diff --git a/.github/actions/clean/action.yml b/.github/actions/clean/action.yml index 547738b17cc..8c22523cacf 100644 --- a/.github/actions/clean/action.yml +++ b/.github/actions/clean/action.yml @@ -1,11 +1,23 @@ name: Clean runner description: Clean the runner's temp path on ending +inputs: + images: + description: clean docker images + default: false + type: boolean runs: using: "composite" steps: - - name: Clean + - name: Clean Temp shell: bash run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "${{runner.temp}}" + sudo rm -fr "${{runner.temp}}" + - name: Clean Docker Containers + shell: bash + run: | + docker rm -vf $(docker ps -aq) ||: + - name: Clean Docker Images + if: ${{ inputs.images }} + shell: bash + run: | + docker rmi -f $(docker images -aq) ||: diff --git a/.github/actions/debug/action.yml b/.github/actions/debug/action.yml new file mode 100644 index 00000000000..e1fe3f28024 --- /dev/null +++ b/.github/actions/debug/action.yml @@ -0,0 +1,18 @@ +name: DebugInfo +description: Prints workflow debug info + +runs: + using: "composite" + steps: + - name: Print envs + shell: bash + run: | + echo "::group::Envs" + env + echo "::endgroup::" + - name: Print Event.json + shell: bash + run: | + echo "::group::Event.json" + python3 -m json.tool "$GITHUB_EVENT_PATH" + echo "::endgroup::" diff --git a/.github/workflows/auto_releases.yml b/.github/workflows/auto_releases.yml new file mode 100644 index 00000000000..2fdf4e30a70 --- /dev/null +++ b/.github/workflows/auto_releases.yml @@ -0,0 +1,99 @@ +name: AutoReleases + +env: + PYTHONUNBUFFERED: 1 + +concurrency: + group: autoreleases + +on: + # schedule: + # - cron: '0 9 * * *' + workflow_dispatch: + inputs: + dry-run: + description: 'Dry run' + required: false + default: false + type: boolean + +jobs: + AutoReleaseInfo: + runs-on: [self-hosted, release-maker] + outputs: + data: ${{ steps.info.outputs.AUTO_RELEASE_PARAMS }} + dry_run: ${{ steps.info.outputs.DRY_RUN }} + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + fetch-depth: 0 # full history needed + - name: Debug Info + uses: ./.github/actions/debug + - name: Prepare Info + id: info + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 auto_release.py --prepare + echo "::group::Auto Release Info" + python3 -m json.tool /tmp/autorelease_info.json + echo "::endgroup::" + { + echo 'AUTO_RELEASE_PARAMS<> "$GITHUB_OUTPUT" + if [[ "${{ github.event_name }}" == "schedule" ]]; then + echo "DRY_RUN=true" >> "$GITHUB_OUTPUT" + else + echo "DRY_RUN=${{ github.event.inputs.dry-run }}" >> "$GITHUB_OUTPUT" + fi + - name: Post Release Branch statuses + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 auto_release.py --post-status + - name: Clean up + uses: ./.github/actions/clean + + Releases: + needs: AutoReleaseInfo + strategy: + matrix: + release_params: ${{ fromJson(needs.AutoReleaseInfo.outputs.data).releases }} + max-parallel: 1 + name: Release ${{ matrix.release_params.release_branch }} + uses: ./.github/workflows/create_release.yml + with: + ref: ${{ matrix.release_params.commit_sha }} + type: patch + dry-run: ${{ fromJson(needs.AutoReleaseInfo.outputs.dry_run) }} + secrets: + ROBOT_CLICKHOUSE_COMMIT_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} + + CleanUp: + needs: [Releases] + runs-on: [self-hosted, release-maker] + steps: + - uses: ./.github/actions/clean + with: + images: true + +# PostSlackMessage: +# needs: [Releases] +# runs-on: [self-hosted, release-maker] +# if: ${{ !cancelled() }} +# steps: +# - name: Check out repository code +# uses: ClickHouse/checkout@v1 +# - name: Post +# run: | +# cd "$GITHUB_WORKSPACE/tests/ci" +# python3 auto_release.py --post-auto-release-complete --wf-status ${{ job.status }} diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index d4993b373df..1fb6cb60e96 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -2,6 +2,7 @@ name: CreateRelease concurrency: group: release + 'on': workflow_dispatch: inputs: @@ -26,6 +27,28 @@ concurrency: required: false default: false type: boolean + workflow_call: + inputs: + ref: + description: 'Git reference (branch or commit sha) from which to create the release' + required: true + type: string + type: + description: 'The type of release: "new" for a new release or "patch" for a patch release' + required: true + type: string + only-repo: + description: 'Run only repos updates including docker (repo-recovery, tests)' + required: false + default: false + type: boolean + dry-run: + description: 'Dry run' + required: false + default: false + type: boolean + secrets: + ROBOT_CLICKHOUSE_COMMIT_TOKEN: jobs: CreateRelease: @@ -101,6 +124,7 @@ jobs: --volume=".:/wd" --workdir="/wd" \ clickhouse/style-test \ ./tests/ci/changelog.py -v --debug-helpers \ + --gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \ --jobs=5 \ --output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md @@ -129,9 +153,9 @@ jobs: if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash run: | - python3 ./tests/ci/create_release.py --set-progress-completed git reset --hard HEAD git checkout "$GITHUB_REF_NAME" + python3 ./tests/ci/create_release.py --set-progress-completed - name: Create GH Release if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index a5cd6321e8c..82826794ea3 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -482,7 +482,7 @@ jobs: if: ${{ !failure() }} run: | # update overall ci report - python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + python3 ./tests/ci/finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - name: Check Workflow results if: ${{ !cancelled() }} run: | @@ -490,5 +490,4 @@ jobs: cat > "$WORKFLOW_RESULT_FILE" << 'EOF' ${{ toJson(needs) }} EOF - python3 ./tests/ci/ci_buddy.py --check-wf-status diff --git a/.gitmodules b/.gitmodules index 0a66031de8d..53ebde0cd3b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -108,7 +108,7 @@ url = https://github.com/ClickHouse/icudata [submodule "contrib/icu"] path = contrib/icu - url = https://github.com/unicode-org/icu + url = https://github.com/ClickHouse/icu [submodule "contrib/flatbuffers"] path = contrib/flatbuffers url = https://github.com/ClickHouse/flatbuffers @@ -345,9 +345,6 @@ [submodule "contrib/FP16"] path = contrib/FP16 url = https://github.com/Maratyszcza/FP16.git -[submodule "contrib/robin-map"] - path = contrib/robin-map - url = https://github.com/Tessil/robin-map.git [submodule "contrib/aklomp-base64"] path = contrib/aklomp-base64 url = https://github.com/aklomp/base64.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 1793fd14ccd..93dcfa18999 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ### Table of Contents +**[ClickHouse release v24.8 LTS, 2024-08-20](#243)**
**[ClickHouse release v24.7, 2024-07-30](#247)**
**[ClickHouse release v24.6, 2024-07-01](#246)**
**[ClickHouse release v24.5, 2024-05-30](#245)**
@@ -10,6 +11,153 @@ # 2024 Changelog +### ClickHouse release 24.8 LTS, 2024-08-20 + +#### Backward Incompatible Change +* `clickhouse-client` and `clickhouse-local` now default to multi-query mode (instead single-query mode). As an example, `clickhouse-client -q "SELECT 1; SELECT 2"` now works, whereas users previously had to add `--multiquery` (or `-n`). The `--multiquery/-n` switch became obsolete. INSERT queries in multi-query statements are treated specially based on their FORMAT clause: If the FORMAT is `VALUES` (the most common case), the end of the INSERT statement is represented by a trailing semicolon `;` at the end of the query. For all other FORMATs (e.g. `CSV` or `JSONEachRow`), the end of the INSERT statement is represented by two newlines `\n\n` at the end of the query. [#63898](https://github.com/ClickHouse/ClickHouse/pull/63898) ([FFish](https://github.com/wxybear)). +* In previous versions, it was possible to use an alternative syntax for `LowCardinality` data types by appending `WithDictionary` to the name of the data type. It was an initial working implementation, and it was never documented or exposed to the public. Now, it is deprecated. If you have used this syntax, you have to ALTER your tables and rename the data types to `LowCardinality`. [#66842](https://github.com/ClickHouse/ClickHouse/pull/66842) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix logical errors with storage `Buffer` used with distributed destination table. It's a backward incompatible change: queries using `Buffer` with a distributed destination table may stop working if the table appears more than once in the query (e.g., in a self-join). [#67015](https://github.com/ClickHouse/ClickHouse/pull/67015) ([vdimir](https://github.com/vdimir)). +* In previous versions, calling functions for random distributions based on the Gamma function (such as Chi-Squared, Student, Fisher) with negative arguments close to zero led to a long computation or an infinite loop. In the new version, calling these functions with zero or negative arguments will produce an exception. This closes [#67297](https://github.com/ClickHouse/ClickHouse/issues/67297). [#67326](https://github.com/ClickHouse/ClickHouse/pull/67326) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)). + +#### New Feature +* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)). +* Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)). +* Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)). +* Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)). +* Intrpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)). +* Added a new server setting, `disable_insertion_and_mutation`. If it is enabled, the server will deny all insertions and mutations. This includes asynchronous INSERTs. This setting can be used to create read-only replicas. [#66519](https://github.com/ClickHouse/ClickHouse/pull/66519) ([Xu Jia](https://github.com/XuJia0210)). +* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)). +* Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)). +* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)). +* Add a query `ALTER TABLE ... DROP DETACHED PARTITION ALL` to drop all detached partitions. [#67885](https://github.com/ClickHouse/ClickHouse/pull/67885) ([Duc Canh Le](https://github.com/canhld94)). +* Add the `rows_before_aggregation_at_least` statistic to the query response when a new setting, `rows_before_aggregation` is enabled. This statistic represents the number of rows read before aggregation. In the context of a distributed query, when using the `group by` or `max` aggregation function without a `limit`, `rows_before_aggregation_at_least` can reflect the number of rows hit by the query. [#66084](https://github.com/ClickHouse/ClickHouse/pull/66084) ([morning-color](https://github.com/morning-color)). +* Support `OPTIMIZE` query on `Join` tables to reduce their memory footprint. [#67883](https://github.com/ClickHouse/ClickHouse/pull/67883) ([Duc Canh Le](https://github.com/canhld94)). +* Allow run query instantly in play if you add `&run=1` in the URL [#66457](https://github.com/ClickHouse/ClickHouse/pull/66457) ([Aleksandr Musorin](https://github.com/AVMusorin)). + +#### Experimental Feature +* Implement a new `JSON` data type. [#66444](https://github.com/ClickHouse/ClickHouse/pull/66444) ([Kruglov Pavel](https://github.com/Avogar)). +* Add the new `TimeSeries` table engine. [#64183](https://github.com/ClickHouse/ClickHouse/pull/64183) ([Vitaly Baranov](https://github.com/vitlibar)). +* Add new experimental `Kafka` storage engine to store offsets in Keeper instead of relying on committing them to Kafka. It makes the commit to ClickHouse tables atomic with regard to consumption from the queue. [#57625](https://github.com/ClickHouse/ClickHouse/pull/57625) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Use adaptive read task size calculation method (adaptive meaning it depends on read column sizes) for parallel replicas. [#60377](https://github.com/ClickHouse/ClickHouse/pull/60377) ([Nikita Taranov](https://github.com/nickitat)). +* Added statistics type `count_min` (count-min sketches) which provide selectivity estimations for equality predicates like `col = 'val'`. Supported data types are string, date, datatime and numeric types. [#65521](https://github.com/ClickHouse/ClickHouse/pull/65521) ([JackyWoo](https://github.com/JackyWoo)). + +#### Performance Improvement +* Setting `optimize_functions_to_subcolumns` is enabled by default. [#68053](https://github.com/ClickHouse/ClickHouse/pull/68053) ([Anton Popov](https://github.com/CurtizJ)). +* Store the `plain_rewritable` disk directory metadata in `__meta` layout, separately from the merge tree data in the object storage. Move the `plain_rewritable` disk to a flat directory structure. [#65751](https://github.com/ClickHouse/ClickHouse/pull/65751) ([Julia Kartseva](https://github.com/jkartseva)). +* Improve columns squashing (an operation happening in INSERT queries) for `String`/`Array`/`Map`/`Variant`/`Dynamic` types by reserving required memory in advance for all subcolumns. [#67043](https://github.com/ClickHouse/ClickHouse/pull/67043) ([Kruglov Pavel](https://github.com/Avogar)). +* Speed up `SYSTEM FLUSH LOGS` and flush logs on shutdown. [#67472](https://github.com/ClickHouse/ClickHouse/pull/67472) ([Sema Checherinda](https://github.com/CheSema)). +* Improved overall performance of merges by reducing the overhead of the scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)). +* Speed up tables removal for `DROP DATABASE` query, increased the default value for `database_catalog_drop_table_concurrency` to 16. [#67228](https://github.com/ClickHouse/ClickHouse/pull/67228) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Avoid allocating too much capacity for array column while writing ORC. Performance speeds up 15% for an Array column. [#67879](https://github.com/ClickHouse/ClickHouse/pull/67879) ([李扬](https://github.com/taiyang-li)). +* Speed up mutations for non-replicated MergeTree significantly [#66911](https://github.com/ClickHouse/ClickHouse/pull/66911) [#66909](https://github.com/ClickHouse/ClickHouse/pull/66909) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Improvement +* Setting `allow_experimental_analyzer` is renamed to `enable_analyzer`. The old name is preserved in a form of an alias. This signifies that Analyzer is no longer in beta and is fully promoted to production. [#66438](https://github.com/ClickHouse/ClickHouse/pull/66438) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Improve schema inference of date times. Now DateTime64 used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)). +* ClickHouse server now supports new setting `max_keep_alive_requests`. For keep-alive HTTP connections to the server it works in tandem with `keep_alive_timeout` - if idle timeout not expired but there already more than `max_keep_alive_requests` requests done through the given connection - it will be closed by the server. [#61793](https://github.com/ClickHouse/ClickHouse/pull/61793) ([Nikita Taranov](https://github.com/nickitat)). +* Various improvements in the advanced dashboard. This closes [#67697](https://github.com/ClickHouse/ClickHouse/issues/67697). This closes [#63407](https://github.com/ClickHouse/ClickHouse/issues/63407). This closes [#51129](https://github.com/ClickHouse/ClickHouse/issues/51129). This closes [#61204](https://github.com/ClickHouse/ClickHouse/issues/61204). [#67701](https://github.com/ClickHouse/ClickHouse/pull/67701) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Do not require a grant for REMOTE when creating a Distributed table: a grant for the Distributed engine is enough. [#65419](https://github.com/ClickHouse/ClickHouse/pull/65419) ([jsc0218](https://github.com/jsc0218)). +* Do not pass logs for keeper explicitly in the Docker image to allow overriding. [#65564](https://github.com/ClickHouse/ClickHouse/pull/65564) ([Azat Khuzhin](https://github.com/azat)). +* Introduced `use_same_password_for_base_backup` settings for `BACKUP` and `RESTORE` queries, allowing to create and restore incremental backups to/from password protected archives. [#66214](https://github.com/ClickHouse/ClickHouse/pull/66214) ([Samuele](https://github.com/sguerrini97)). +* Ignore `async_load_databases` for `ATTACH` query (previously it was possible for ATTACH to return before the tables had been attached). [#66240](https://github.com/ClickHouse/ClickHouse/pull/66240) ([Azat Khuzhin](https://github.com/azat)). +* Added logs and metrics for rejected connections (where there are not enough resources). [#66410](https://github.com/ClickHouse/ClickHouse/pull/66410) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Support proper `UUID` type for MongoDB engine. [#66671](https://github.com/ClickHouse/ClickHouse/pull/66671) ([Azat Khuzhin](https://github.com/azat)). +* Add replication lag and recovery time metrics. [#66703](https://github.com/ClickHouse/ClickHouse/pull/66703) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Add `DiskS3NoSuchKeyErrors` metric. [#66704](https://github.com/ClickHouse/ClickHouse/pull/66704) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Ensure the `COMMENT` clause works for all table engines. [#66832](https://github.com/ClickHouse/ClickHouse/pull/66832) ([Joe Lynch](https://github.com/joelynch)). +* Function `mapFromArrays` now accepts `Map(K, V)` as first argument, for example: `SELECT mapFromArrays(map('a', 4, 'b', 4), ['aa', 'bb'])` now works and returns `{('a',4):'aa',('b',4):'bb'}`. Also, if the 1st argument is an Array, it can now also be of type `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as long as the actual array values are not `NULL`. [#67103](https://github.com/ClickHouse/ClickHouse/pull/67103) ([李扬](https://github.com/taiyang-li)). +* Read configuration for `clickhouse-local` from `~/.clickhouse-local`. [#67135](https://github.com/ClickHouse/ClickHouse/pull/67135) ([Azat Khuzhin](https://github.com/azat)). +* Rename setting `input_format_orc_read_use_writer_time_zone` to `input_format_orc_reader_timezone` and allow the user to set the reader timezone. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)). +* Decrease level of the `Socket is not connected` error when HTTP connection immediately reset by peer after connecting, close [#34218](https://github.com/ClickHouse/ClickHouse/issues/34218). [#67177](https://github.com/ClickHouse/ClickHouse/pull/67177) ([vdimir](https://github.com/vdimir)). +* Add ability to load dashboards for `system.dashboards` from config (once set, they overrides the default dashboards preset). [#67232](https://github.com/ClickHouse/ClickHouse/pull/67232) ([Azat Khuzhin](https://github.com/azat)). +* The window functions in SQL are traditionally in snake case. ClickHouse uses `camelCase`, so new aliases `denseRank()` and `percentRank()` have been created. These new functions can be called the exact same as the original `dense_rank()` and `percent_rank()` functions. Both snake case and camelCase syntaxes remain usable. A new test for each of the functions has been added as well. This closes [#67042](https://github.com/ClickHouse/ClickHouse/issues/67042) . [#67334](https://github.com/ClickHouse/ClickHouse/pull/67334) ([Peter Nguyen](https://github.com/petern48)). +* Autodetect configuration file format if is not `.xml`, `.yml` or `.yaml`. If the file begins with < it might be XML, otherwise it might be YAML. It is useful when providing a configuration file from a pipe: `clickhouse-server --config-file <(echo "hello: world")`. [#67391](https://github.com/ClickHouse/ClickHouse/pull/67391) ([sakulali](https://github.com/sakulali)). +* Functions `formatDateTime` and `formatDateTimeInJodaSyntax` now treat their format parameter as optional. If it is not specified, format strings `%Y-%m-%d %H:%i:%s` and `yyyy-MM-dd HH:mm:ss` are assumed. Example: `SELECT parseDateTime('2021-01-04 23:12:34')` now returns DateTime value `2021-01-04 23:12:34` (previously, this threw an exception). [#67399](https://github.com/ClickHouse/ClickHouse/pull/67399) ([Robert Schulze](https://github.com/rschu1ze)). +* Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)). +* Add `-no-pie` to Aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)). +* Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)). +* Fix settings and `current_database` in `system.processes` for async BACKUP/RESTORE. [#68163](https://github.com/ClickHouse/ClickHouse/pull/68163) ([Azat Khuzhin](https://github.com/azat)). +* Remove unnecessary logs for non-replicated `MergeTree`. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)). + +#### Build/Testing/Packaging Improvement +* Integration tests flaky check will not run each test case multiple times to find more issues in tests and make them more reliable. It is using `pytest-repeat` library to run test case multiple times for the same environment. It is important to cleanup tables and other entities in the end of a test case to pass. Repeating works much faster than several pytest runs as it starts necessary containers only once. [#66986](https://github.com/ClickHouse/ClickHouse/pull/66986) ([Ilya Yatsishin](https://github.com/qoega)). +* Unblock the usage of CLion with ClickHouse. In previous versions, CLion freezed for a minute on every keypress. This closes [#66994](https://github.com/ClickHouse/ClickHouse/issues/66994). [#66995](https://github.com/ClickHouse/ClickHouse/pull/66995) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* getauxval: avoid a crash under a sanitizer re-exec due to high ASLR entropy in newer Linux kernels. [#67081](https://github.com/ClickHouse/ClickHouse/pull/67081) ([Raúl Marín](https://github.com/Algunenano)). +* Some parts of client code are extracted to a single file and highest possible level optimization is applied to them even for debug builds. This closes: [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). [#67215](https://github.com/ClickHouse/ClickHouse/pull/67215) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### Bug Fix +* Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)). +* Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)). +* Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)). +* Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)). +* Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)). +* Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)). +* Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)). +* Fix formatting of query with aliased JOIN ON expression, e.g. `... JOIN t2 ON (x = y) AS e ORDER BY x` should be formatted as `... JOIN t2 ON ((x = y) AS e) ORDER BY x`. [#66312](https://github.com/ClickHouse/ClickHouse/pull/66312) ([vdimir](https://github.com/vdimir)). +* Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible runtime error while converting Array field with nulls to Array(Variant). [#66727](https://github.com/ClickHouse/ClickHouse/pull/66727) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)). +* Fix creating KeeperMap table after an incomplete drop. [#66865](https://github.com/ClickHouse/ClickHouse/pull/66865) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix broken part error while restoring to a `s3_plain_rewritable` disk. [#66881](https://github.com/ClickHouse/ClickHouse/pull/66881) ([Vitaly Baranov](https://github.com/vitlibar)). +* In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)). +* Fix invalid format detection in schema inference that could lead to logical error Format {} doesn't support schema inference. [#66899](https://github.com/ClickHouse/ClickHouse/pull/66899) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)). +* Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)). +* Reading from the `numbers` could wrongly throw an exception when the `max_rows_to_read` limit was set. This closes [#66992](https://github.com/ClickHouse/ClickHouse/issues/66992). [#66996](https://github.com/ClickHouse/ClickHouse/pull/66996) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add proper type conversion to lagInFrame and leadInFrame window functions - fixes msan test. [#67091](https://github.com/ClickHouse/ClickHouse/pull/67091) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Use a separate client context in `clickhouse-local`. [#67133](https://github.com/ClickHouse/ClickHouse/pull/67133) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Correct behavior of `ORDER BY all` with disabled `enable_order_by_all` and parallel replicas (distributed queries as well). [#67153](https://github.com/ClickHouse/ClickHouse/pull/67153) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix wrong usage of input_format_max_bytes_to_read_for_schema_inference in schema cache. [#67157](https://github.com/ClickHouse/ClickHouse/pull/67157) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix the memory leak for count distinct, when exception issued during group by single nullable key. [#67171](https://github.com/ClickHouse/ClickHouse/pull/67171) ([Jet He](https://github.com/compasses)). +* Fix an error in optimization which converts OUTER JOIN to INNER JOIN. This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix error `Conversion from AggregateFunction(name, Type) to AggregateFunction(name, Nullable(Type)) is not supported`. The bug was caused by the `optimize_rewrite_aggregate_function_with_if` optimization. Fixes [#67112](https://github.com/ClickHouse/ClickHouse/issues/67112). [#67229](https://github.com/ClickHouse/ClickHouse/pull/67229) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix hung query when using empty tuple as lhs of function IN. [#67295](https://github.com/ClickHouse/ClickHouse/pull/67295) ([Duc Canh Le](https://github.com/canhld94)). +* It was possible to create a very deep nested JSON data that triggered stack overflow while skipping unknown fields. This closes [#67292](https://github.com/ClickHouse/ClickHouse/issues/67292). [#67324](https://github.com/ClickHouse/ClickHouse/pull/67324) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix attaching ReplicatedMergeTree table after exception during startup. [#67360](https://github.com/ClickHouse/ClickHouse/pull/67360) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix segfault caused by incorrectly detaching from thread group in `Aggregator`. [#67385](https://github.com/ClickHouse/ClickHouse/pull/67385) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix one more case when a non-deterministic function is specified in PK. [#67395](https://github.com/ClickHouse/ClickHouse/pull/67395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed `bloom_filter` index breaking queries with mildly weird conditions like `(k=2)=(k=2)` or `has([1,2,3], k)`. [#67423](https://github.com/ClickHouse/ClickHouse/pull/67423) ([Michael Kolupaev](https://github.com/al13n321)). +* Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix wait for tasks in ~WriteBufferFromS3 in case WriteBuffer was cancelled. [#67459](https://github.com/ClickHouse/ClickHouse/pull/67459) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Protect temporary part directories from removing during RESTORE. [#67491](https://github.com/ClickHouse/ClickHouse/pull/67491) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix `Logical error: Expected the argument №N of type T to have X rows, but it has 0`. The error could happen in a remote query with constant expression in `GROUP BY` (with a new analyzer). [#67536](https://github.com/ClickHouse/ClickHouse/pull/67536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix join on tuple with NULLs: Some queries with the new analyzer and `NULL` inside the tuple in the `JOIN ON` section returned incorrect results. [#67538](https://github.com/ClickHouse/ClickHouse/pull/67538) ([vdimir](https://github.com/vdimir)). +* Fix redundant reschedule of FileCache::freeSpaceRatioKeepingThreadFunc() in case of full non-evictable cache. [#67540](https://github.com/ClickHouse/ClickHouse/pull/67540) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix for function `toStartOfWeek` which returned the wrong result with a small `DateTime64` value. [#67558](https://github.com/ClickHouse/ClickHouse/pull/67558) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix `Logical error: 'file_offset_of_buffer_end <= read_until_position'` in filesystem cache. Closes [#57508](https://github.com/ClickHouse/ClickHouse/issues/57508). [#67623](https://github.com/ClickHouse/ClickHouse/pull/67623) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixes [#62282](https://github.com/ClickHouse/ClickHouse/issues/62282). Removed the call to `convertFieldToString()` and added datatype specific serialization code. Parameterized view substitution was broken for multiple datatypes when parameter value was a function or expression returning datatype instance. [#67654](https://github.com/ClickHouse/ClickHouse/pull/67654) ([Shankar](https://github.com/shiyer7474)). +* Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)). +* Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix possible logical error "Unexpected return type from if" with experimental Variant type and enabled setting `use_variant_as_common_type ` in function if with Tuples and Maps. [#67687](https://github.com/ClickHouse/ClickHouse/pull/67687) ([Kruglov Pavel](https://github.com/Avogar)). +* Due to a bug in Linux Kernel, a query can hung in `TimerDescriptor::drain`. This closes [#37686](https://github.com/ClickHouse/ClickHouse/issues/37686). [#67702](https://github.com/ClickHouse/ClickHouse/pull/67702) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix completion of `RESTORE ON CLUSTER` command. [#67720](https://github.com/ClickHouse/ClickHouse/pull/67720) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix dictionary hang in case of CANNOT_SCHEDULE_TASK while loading. [#67751](https://github.com/ClickHouse/ClickHouse/pull/67751) ([Azat Khuzhin](https://github.com/azat)). +* Queries like `SELECT count() FROM t WHERE cast(c = 1 or c = 9999 AS Bool) SETTINGS use_skip_indexes=1` with bloom filter indexes on `c` now work correctly. [#67781](https://github.com/ClickHouse/ClickHouse/pull/67781) ([jsc0218](https://github.com/jsc0218)). +* Fix wrong aggregation result in some queries with aggregation without keys and filter, close [#67419](https://github.com/ClickHouse/ClickHouse/issues/67419). [#67804](https://github.com/ClickHouse/ClickHouse/pull/67804) ([vdimir](https://github.com/vdimir)). +* Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix DateTime64 parsing after constant folding in distributed queries, close [#66773](https://github.com/ClickHouse/ClickHouse/issues/66773). [#67920](https://github.com/ClickHouse/ClickHouse/pull/67920) ([vdimir](https://github.com/vdimir)). +* Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)). +* Now ClickHouse doesn't consider part as broken if projection doesn't exist on disk but exists in `checksums.txt`. [#68003](https://github.com/ClickHouse/ClickHouse/pull/68003) ([alesapin](https://github.com/alesapin)). +* Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)). +* Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)). +* Attempt to fix `Block structure mismatch in AggregatingStep stream: different types` for aggregate projection optimization. [#68107](https://github.com/ClickHouse/ClickHouse/pull/68107) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)). + + ### ClickHouse release 24.7, 2024-07-30 #### Backward Incompatible Change diff --git a/CMakeLists.txt b/CMakeLists.txt index 884d5be42de..6abf48a6927 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -322,17 +322,21 @@ if (DISABLE_OMIT_FRAME_POINTER) set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer") endif() +# Before you start hating your debugger because it refuses to show variables (''), try building with -DDEBUG_O_LEVEL="0" +# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263 +set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") -set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") +set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") -set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") +set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") -set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") +set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") if (OS_DARWIN) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") @@ -601,7 +605,9 @@ if (NATIVE_BUILD_TARGETS execute_process( COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) execute_process( COMMAND ${CMAKE_COMMAND} @@ -613,9 +619,13 @@ if (NATIVE_BUILD_TARGETS "-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}" ${PROJECT_SOURCE_DIR} WORKING_DIRECTORY "${NATIVE_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) execute_process( COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS} - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) endif () diff --git a/PreLoad.cmake b/PreLoad.cmake index e0fd37b2fd6..92b221c9f63 100644 --- a/PreLoad.cmake +++ b/PreLoad.cmake @@ -51,8 +51,14 @@ if (NOT "$ENV{CFLAGS}" STREQUAL "" endif() # Default toolchain - this is needed to avoid dependency on OS files. -execute_process(COMMAND uname -s OUTPUT_VARIABLE OS) -execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCH) +execute_process(COMMAND uname -s + OUTPUT_VARIABLE OS + COMMAND_ERROR_IS_FATAL ANY +) +execute_process(COMMAND uname -m + OUTPUT_VARIABLE ARCH + COMMAND_ERROR_IS_FATAL ANY +) # By default, prefer clang on Linux # But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER. diff --git a/README.md b/README.md index 2120a4d1211..5e66b9da73e 100644 --- a/README.md +++ b/README.md @@ -34,13 +34,19 @@ curl https://clickhouse.com/ | sh Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know. -* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29 +* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 20 ## Upcoming Events Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `` clickhouse `` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc. -* MORE COMING SOON! +The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov: + +* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25 +* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5 +* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/clickhouse-nc-meetup-group/events/302557230) - September 9 +* [New York Meetup (Ramp)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10 +* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12 ## Recent Recordings * **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments" diff --git a/SECURITY.md b/SECURITY.md index 8930dc96f8a..93c48f1d9ba 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -14,25 +14,15 @@ The following versions of ClickHouse server are currently supported with securit | Version | Supported | |:-|:-| +| 24.8 | ✔️ | | 24.7 | ✔️ | | 24.6 | ✔️ | -| 24.5 | ✔️ | +| 24.5 | ❌ | | 24.4 | ❌ | | 24.3 | ✔️ | | 24.2 | ❌ | | 24.1 | ❌ | -| 23.12 | ❌ | -| 23.11 | ❌ | -| 23.10 | ❌ | -| 23.9 | ❌ | -| 23.8 | ✔️ | -| 23.7 | ❌ | -| 23.6 | ❌ | -| 23.5 | ❌ | -| 23.4 | ❌ | -| 23.3 | ❌ | -| 23.2 | ❌ | -| 23.1 | ❌ | +| 23.* | ❌ | | 22.* | ❌ | | 21.* | ❌ | | 20.* | ❌ | diff --git a/base/base/CMakeLists.txt b/base/base/CMakeLists.txt index 247028b96e0..3d236f52c36 100644 --- a/base/base/CMakeLists.txt +++ b/base/base/CMakeLists.txt @@ -8,6 +8,8 @@ endif () # when instantiated from JSON.cpp. Try again when libcxx(abi) and Clang are upgraded to 16. set (CMAKE_CXX_STANDARD 20) +configure_file(GitHash.cpp.in GitHash.generated.cpp) + set (SRCS argsToConfig.cpp cgroupsv2.cpp @@ -33,6 +35,7 @@ set (SRCS safeExit.cpp throwError.cpp Numa.cpp + GitHash.generated.cpp ) add_library (common ${SRCS}) diff --git a/src/Daemon/GitHash.cpp.in b/base/base/GitHash.cpp.in similarity index 100% rename from src/Daemon/GitHash.cpp.in rename to base/base/GitHash.cpp.in diff --git a/base/base/cgroupsv2.cpp b/base/base/cgroupsv2.cpp index 87f62bf377d..e0e37c8729b 100644 --- a/base/base/cgroupsv2.cpp +++ b/base/base/cgroupsv2.cpp @@ -27,27 +27,6 @@ bool cgroupsV2Enabled() #endif } -bool cgroupsV2MemoryControllerEnabled() -{ -#if defined(OS_LINUX) - chassert(cgroupsV2Enabled()); - /// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available - /// for the current + child cgroups. The set of available controllers can be restricted from level to level using file - /// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file. - fs::path cgroup_dir = cgroupV2PathOfProcess(); - if (cgroup_dir.empty()) - return false; - std::ifstream controllers_file(cgroup_dir / "cgroup.controllers"); - if (!controllers_file.is_open()) - return false; - std::string controllers; - std::getline(controllers_file, controllers); - return controllers.find("memory") != std::string::npos; -#else - return false; -#endif -} - fs::path cgroupV2PathOfProcess() { #if defined(OS_LINUX) @@ -71,3 +50,28 @@ fs::path cgroupV2PathOfProcess() return {}; #endif } + +std::optional getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name) +{ +#if defined(OS_LINUX) + if (!cgroupsV2Enabled()) + return {}; + + fs::path current_cgroup = cgroupV2PathOfProcess(); + if (current_cgroup.empty()) + return {}; + + /// Return the bottom-most nested file. If there is no such file at the current + /// level, try again at the parent level as settings are inherited. + while (current_cgroup != default_cgroups_mount.parent_path()) + { + const auto path = current_cgroup / file_name; + if (fs::exists(path)) + return {current_cgroup}; + current_cgroup = current_cgroup.parent_path(); + } + return {}; +#else + return {}; +#endif +} diff --git a/base/base/cgroupsv2.h b/base/base/cgroupsv2.h index cfb916ff358..a6276474254 100644 --- a/base/base/cgroupsv2.h +++ b/base/base/cgroupsv2.h @@ -1,6 +1,7 @@ #pragma once #include +#include #if defined(OS_LINUX) /// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers). @@ -11,11 +12,11 @@ static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgrou /// Is cgroups v2 enabled on the system? bool cgroupsV2Enabled(); -/// Is the memory controller of cgroups v2 enabled on the system? -/// Assumes that cgroupsV2Enabled() is enabled. -bool cgroupsV2MemoryControllerEnabled(); - /// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup. /// Returns an empty path the cgroup cannot be determined. /// Assumes that cgroupsV2Enabled() is enabled. std::filesystem::path cgroupV2PathOfProcess(); + +/// Returns the most nested cgroup dir containing the specified file. +/// If cgroups v2 is not enabled - returns an empty optional. +std::optional getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name); diff --git a/base/base/getMemoryAmount.cpp b/base/base/getMemoryAmount.cpp index 03aab1eac72..bbfbecdbffd 100644 --- a/base/base/getMemoryAmount.cpp +++ b/base/base/getMemoryAmount.cpp @@ -19,9 +19,6 @@ std::optional getCgroupsV2MemoryLimit() if (!cgroupsV2Enabled()) return {}; - if (!cgroupsV2MemoryControllerEnabled()) - return {}; - std::filesystem::path current_cgroup = cgroupV2PathOfProcess(); if (current_cgroup.empty()) return {}; diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index d69646d3694..c82038804fe 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54489) +SET(VERSION_REVISION 54490) SET(VERSION_MAJOR 24) -SET(VERSION_MINOR 8) +SET(VERSION_MINOR 9) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af) -SET(VERSION_DESCRIBE v24.8.1.1-testing) -SET(VERSION_STRING 24.8.1.1) +SET(VERSION_GITHASH e02b434d2fc0c4fbee29ca675deab7474d274608) +SET(VERSION_DESCRIBE v24.9.1.1-testing) +SET(VERSION_STRING 24.9.1.1) # end of autochange diff --git a/cmake/freebsd/default_libs.cmake b/cmake/freebsd/default_libs.cmake index 6bde75f8c9a..3f5b3829877 100644 --- a/cmake/freebsd/default_libs.cmake +++ b/cmake/freebsd/default_libs.cmake @@ -9,10 +9,18 @@ endif () file(GLOB bprefix "/usr/local/llvm${COMPILER_VERSION_MAJOR}/lib/clang/${COMPILER_VERSION_MAJOR}/lib/${system_processor}-portbld-freebsd*/") message(STATUS "-Bprefix: ${bprefix}") -execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) +execute_process(COMMAND + ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a + OUTPUT_VARIABLE BUILTINS_LIBRARY + COMMAND_ERROR_IS_FATAL ANY + OUTPUT_STRIP_TRAILING_WHITESPACE) # --print-file-name simply prints what you passed in case of nothing was resolved, so let's try one other possible option if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins-${system_processor}.a") - execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) + execute_process(COMMAND + ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a + OUTPUT_VARIABLE BUILTINS_LIBRARY + COMMAND_ERROR_IS_FATAL ANY + OUTPUT_STRIP_TRAILING_WHITESPACE) endif() if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins.a") message(FATAL_ERROR "libclang_rt.builtins had not been found") diff --git a/cmake/limit_jobs.cmake b/cmake/limit_jobs.cmake index 17d8dd42a2c..8e48fc9b9d8 100644 --- a/cmake/limit_jobs.cmake +++ b/cmake/limit_jobs.cmake @@ -42,19 +42,9 @@ endif () # But use 2 parallel jobs, since: # - this is what llvm does # - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary -if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO) - if (ARCH_AARCH64) - # aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency - message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.") - set (PARALLEL_LINK_JOBS 1) - if (LINKER_NAME MATCHES "lld") - math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4) - set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}") - endif() - elseif (PARALLEL_LINK_JOBS GREATER 2) - message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.") - set (PARALLEL_LINK_JOBS 2) - endif () +if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2) + message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.") + set (PARALLEL_LINK_JOBS 2) endif() message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).") diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake index 4a06243243e..51620bc9f33 100644 --- a/cmake/linux/default_libs.cmake +++ b/cmake/linux/default_libs.cmake @@ -5,7 +5,11 @@ set (DEFAULT_LIBS "-nodefaultlibs") # We need builtins from Clang's RT even without libcxx - for ubsan+int128. # See https://bugs.llvm.org/show_bug.cgi?id=16404 -execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE) +execute_process (COMMAND + ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt + OUTPUT_VARIABLE BUILTINS_LIBRARY + COMMAND_ERROR_IS_FATAL ANY + OUTPUT_STRIP_TRAILING_WHITESPACE) # Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both. if (SANITIZE STREQUAL undefined) diff --git a/cmake/tools.cmake b/cmake/tools.cmake index 7aa5d4c51ce..5c7da54b779 100644 --- a/cmake/tools.cmake +++ b/cmake/tools.cmake @@ -5,7 +5,11 @@ if (NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") endif () # Print details to output -execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION OUTPUT_STRIP_TRAILING_WHITESPACE) +execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version + OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION + COMMAND_ERROR_IS_FATAL ANY + OUTPUT_STRIP_TRAILING_WHITESPACE +) message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}") # Require minimum compiler versions diff --git a/cmake/utils.cmake b/cmake/utils.cmake index a318408098a..a99d8e050a8 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -90,7 +90,10 @@ endfunction() # Function get_cmake_properties returns list of all propreties that cmake supports function(get_cmake_properties outvar) - execute_process(COMMAND cmake --help-property-list OUTPUT_VARIABLE cmake_properties) + execute_process(COMMAND cmake --help-property-list + OUTPUT_VARIABLE cmake_properties + COMMAND_ERROR_IS_FATAL ANY + ) # Convert command output into a CMake list string(REGEX REPLACE ";" "\\\\;" cmake_properties "${cmake_properties}") string(REGEX REPLACE "\n" ";" cmake_properties "${cmake_properties}") diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index dc2ad2a3150..d7489bc5c0e 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -209,9 +209,8 @@ endif() option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES}) if (ENABLE_USEARCH) add_contrib (FP16-cmake FP16) - add_contrib (robin-map-cmake robin-map) add_contrib (SimSIMD-cmake SimSIMD) - add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD + add_contrib (usearch-cmake usearch) # requires: FP16, SimdSIMD else () message(STATUS "Not using USearch") endif () diff --git a/contrib/SimSIMD b/contrib/SimSIMD index de2cb75b9e9..91a76d1ac51 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf +Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26 diff --git a/contrib/aws b/contrib/aws index 1c2946bfcb7..d5450d76abd 160000 --- a/contrib/aws +++ b/contrib/aws @@ -1 +1 @@ -Subproject commit 1c2946bfcb7f1e3ae0a858de0b59d4f1a7b4ccaf +Subproject commit d5450d76abda556ce145ddabe7e0cc6a7644ec59 diff --git a/contrib/aws-crt-cpp b/contrib/aws-crt-cpp index f532d6abc0d..e5aa45cacfd 160000 --- a/contrib/aws-crt-cpp +++ b/contrib/aws-crt-cpp @@ -1 +1 @@ -Subproject commit f532d6abc0d2b0d8b5d6fe9e7c51eaedbe4afbd0 +Subproject commit e5aa45cacfdcda7719ead38760e7c61076f5745f diff --git a/contrib/cctz-cmake/CMakeLists.txt b/contrib/cctz-cmake/CMakeLists.txt index 7161f743de1..fadf948b053 100644 --- a/contrib/cctz-cmake/CMakeLists.txt +++ b/contrib/cctz-cmake/CMakeLists.txt @@ -37,7 +37,9 @@ message(STATUS "Packaging with tzdata version: ${TZDATA_VERSION}") execute_process(COMMAND bash -c "cd ${TZDIR} && find * -type f -and ! -name '*.tab' -and ! -name 'localtime' | LC_ALL=C sort | paste -sd ';' -" OUTPUT_STRIP_TRAILING_WHITESPACE - OUTPUT_VARIABLE TIMEZONES) + OUTPUT_VARIABLE TIMEZONES + COMMAND_ERROR_IS_FATAL ANY +) file(APPEND ${TIMEZONES_FILE} "// autogenerated by ClickHouse/contrib/cctz-cmake/CMakeLists.txt\n") file(APPEND ${TIMEZONES_FILE} "#include \n") diff --git a/contrib/google-protobuf-cmake/CMakeLists.txt b/contrib/google-protobuf-cmake/CMakeLists.txt index e44f737cfc3..f1a744f851f 100644 --- a/contrib/google-protobuf-cmake/CMakeLists.txt +++ b/contrib/google-protobuf-cmake/CMakeLists.txt @@ -359,7 +359,9 @@ else () execute_process( COMMAND mkdir -p ${PROTOC_BUILD_DIR} - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) execute_process( COMMAND ${CMAKE_COMMAND} @@ -375,11 +377,15 @@ else () "-DABSL_ENABLE_INSTALL=0" "${protobuf_source_dir}" WORKING_DIRECTORY "${PROTOC_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) execute_process( COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) endif () add_executable(protoc IMPORTED GLOBAL) diff --git a/contrib/grpc-cmake/CMakeLists.txt b/contrib/grpc-cmake/CMakeLists.txt index 1c0bf41ff78..975774d1990 100644 --- a/contrib/grpc-cmake/CMakeLists.txt +++ b/contrib/grpc-cmake/CMakeLists.txt @@ -51,8 +51,9 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME set(OPENSSL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openssl-cmake") execute_process( - COMMAND mkdir -p ${OPENSSL_BUILD_DIR} - COMMAND_ECHO STDOUT + COMMAND mkdir -p ${OPENSSL_BUILD_DIR} + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY ) if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "amd64|x86_64") @@ -89,15 +90,21 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME "-DClickHouse_SOURCE_DIR=${ClickHouse_SOURCE_DIR}" "${OPENSSL_SOURCE_DIR}" WORKING_DIRECTORY "${OPENSSL_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) execute_process( COMMAND ${CMAKE_COMMAND} --build "${OPENSSL_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) execute_process( COMMAND ${CMAKE_COMMAND} --install "${OPENSSL_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) # It's not important on which file we depend, we just want to specify right order add_library(openssl_for_grpc STATIC IMPORTED GLOBAL) @@ -108,8 +115,9 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME set (GRPC_CPP_PLUGIN_BUILD_DIR "${_gRPC_BINARY_DIR}/build") execute_process( - COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR} - COMMAND_ECHO STDOUT + COMMAND mkdir -p ${GRPC_CPP_PLUGIN_BUILD_DIR} + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY ) set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp") @@ -140,11 +148,15 @@ if (NOT CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME "-DgRPC_SSL_PROVIDER=package" "${_gRPC_SOURCE_DIR}" WORKING_DIRECTORY "${GRPC_CPP_PLUGIN_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) execute_process( COMMAND ${CMAKE_COMMAND} --build "${GRPC_CPP_PLUGIN_BUILD_DIR}" - COMMAND_ECHO STDOUT) + COMMAND_ECHO STDOUT + COMMAND_ERROR_IS_FATAL ANY + ) add_executable(grpc_cpp_plugin IMPORTED GLOBAL) set_target_properties (grpc_cpp_plugin PROPERTIES IMPORTED_LOCATION "${GRPC_CPP_PLUGIN_BUILD_DIR}/grpc_cpp_plugin") diff --git a/contrib/icu b/contrib/icu index 7750081bda4..4216173eeeb 160000 --- a/contrib/icu +++ b/contrib/icu @@ -1 +1 @@ -Subproject commit 7750081bda4b3bc1768ae03849ec70f67ea10625 +Subproject commit 4216173eeeb39c1d4caaa54a68860e800412d273 diff --git a/contrib/robin-map b/contrib/robin-map deleted file mode 160000 index 851a59e0e30..00000000000 --- a/contrib/robin-map +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d diff --git a/contrib/robin-map-cmake/CMakeLists.txt b/contrib/robin-map-cmake/CMakeLists.txt deleted file mode 100644 index f82ad705dcc..00000000000 --- a/contrib/robin-map-cmake/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -# See contrib/usearch-cmake/CMakeLists.txt diff --git a/contrib/usearch b/contrib/usearch index 30810452bec..e21a5778a0d 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356 +Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4 diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 6be622275ae..df131e0c528 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -1,5 +1,4 @@ set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16") -set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map") set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") @@ -7,8 +6,17 @@ add_library(_usearch INTERFACE) target_include_directories(_usearch SYSTEM INTERFACE ${FP16_PROJECT_DIR}/include - ${ROBIN_MAP_PROJECT_DIR}/include ${SIMSIMD_PROJECT_DIR}/include ${USEARCH_PROJECT_DIR}/include) +target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB) + +# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD) +# ^^ simsimd is not enabled at the moment. Reasons: +# - Vectorization is important for raw scans but not so much for HNSW. We use usearch only for HNSW. +# - Simsimd does compile-time dispatch (choice of SIMD kernels determined by capabilities of the build machine) or dynamic dispatch (SIMD +# kernels chosen at runtime based on cpuid instruction). Since current builds are limited to SSE 4.2 (x86) and NEON (ARM), the speedup of +# the former would be moderate compared to AVX-512 / SVE. The latter is at the moment too fragile with respect to portability across x86 +# and ARM machines ... certain conbinations of quantizations / distance functions / SIMD instructions are not implemented at the moment. + add_library(ch_contrib::usearch ALIAS _usearch) diff --git a/docker/images.json b/docker/images.json index 716b76ee217..055394b69e6 100644 --- a/docker/images.json +++ b/docker/images.json @@ -47,8 +47,7 @@ "docker/test/stateful": { "name": "clickhouse/stateful-test", "dependent": [ - "docker/test/stress", - "docker/test/upgrade" + "docker/test/stress" ] }, "docker/test/unit": { @@ -59,10 +58,6 @@ "name": "clickhouse/stress-test", "dependent": [] }, - "docker/test/upgrade": { - "name": "clickhouse/upgrade-check", - "dependent": [] - }, "docker/test/integration/runner": { "name": "clickhouse/integration-tests-runner", "dependent": [] diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index a44664259fb..fc93cee5bbc 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.7.3.42" +ARG VERSION="24.8.1.2684" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 2565828c846..3ceaf2a08b4 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.7.3.42" +ARG VERSION="24.8.1.2684" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 5ac8a58afea..76db997821c 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.7.3.42" +ARG VERSION="24.8.1.2684" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 5d311c673a4..ca93b24f66e 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -93,6 +93,3 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ENV COMMIT_SHA='' ENV PULL_REQUEST_NUMBER='' ENV COPY_CLICKHOUSE_BINARY_TO_OUTPUT=0 - -COPY run.sh / -CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/sqllogic/Dockerfile b/docker/test/sqllogic/Dockerfile index 1425e12cd84..6397526388e 100644 --- a/docker/test/sqllogic/Dockerfile +++ b/docker/test/sqllogic/Dockerfile @@ -35,7 +35,6 @@ RUN mkdir -p /tmp/clickhouse-odbc-tmp \ ENV TZ=Europe/Amsterdam -ENV MAX_RUN_TIME=9000 RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git" diff --git a/docker/test/sqllogic/run.sh b/docker/test/sqllogic/run.sh index ccba344035e..32368980f9b 100755 --- a/docker/test/sqllogic/run.sh +++ b/docker/test/sqllogic/run.sh @@ -94,7 +94,7 @@ function run_tests() export -f run_tests -timeout "${MAX_RUN_TIME:-9000}" bash -c run_tests || echo "timeout reached" >&2 +run_tests #/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv diff --git a/docker/test/sqltest/Dockerfile b/docker/test/sqltest/Dockerfile index 71d915b0c7a..b805bb03c2b 100644 --- a/docker/test/sqltest/Dockerfile +++ b/docker/test/sqltest/Dockerfile @@ -22,7 +22,6 @@ ARG sqltest_repo="https://github.com/elliotchance/sqltest/" RUN git clone ${sqltest_repo} ENV TZ=UTC -ENV MAX_RUN_TIME=900 RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone COPY run.sh / diff --git a/docker/test/stateful/Dockerfile b/docker/test/stateful/Dockerfile index 0daf88cad7e..9aa936cb069 100644 --- a/docker/test/stateful/Dockerfile +++ b/docker/test/stateful/Dockerfile @@ -10,7 +10,3 @@ RUN apt-get update -y \ npm \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* - -COPY create.sql / -COPY run.sh / -CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/stateful/setup_minio.sh b/docker/test/stateful/setup_minio.sh deleted file mode 120000 index 0d539f72cb3..00000000000 --- a/docker/test/stateful/setup_minio.sh +++ /dev/null @@ -1 +0,0 @@ -../stateless/setup_minio.sh \ No newline at end of file diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index d8eb072328f..69f81b35a95 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -65,7 +65,6 @@ ENV TZ=Europe/Amsterdam RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ENV NUM_TRIES=1 -ENV MAX_RUN_TIME=0 # Unrelated to vars in setup_minio.sh, but should be the same there # to have the same binaries for local running scenario @@ -86,18 +85,6 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo ENV MINIO_ROOT_USER="clickhouse" ENV MINIO_ROOT_PASSWORD="clickhouse" ENV EXPORT_S3_STORAGE_POLICIES=1 -ENV CLICKHOUSE_GRPC_CLIENT="/usr/share/clickhouse-utils/grpc-client/clickhouse-grpc-client.py" RUN npm install -g azurite@3.30.0 \ && npm install -g tslib && npm install -g node - -COPY run.sh / -COPY setup_minio.sh / -COPY setup_hdfs_minicluster.sh / -COPY attach_gdb.lib / -COPY utils.lib / - -# We store stress_tests.lib in stateless image to avoid duplication of this file in stress and upgrade tests -COPY stress_tests.lib / - -CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile index 0f81a1cd07f..ecb98a4e3ed 100644 --- a/docker/test/stress/Dockerfile +++ b/docker/test/stress/Dockerfile @@ -22,8 +22,5 @@ RUN apt-get update -y \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* -COPY run.sh / - ENV EXPORT_S3_STORAGE_POLICIES=1 -CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/upgrade/Dockerfile b/docker/test/upgrade/Dockerfile deleted file mode 100644 index 78d912fd031..00000000000 --- a/docker/test/upgrade/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -# rebuild in #33610 -# docker build -t clickhouse/upgrade-check . -ARG FROM_TAG=latest -FROM clickhouse/stateful-test:$FROM_TAG - -RUN apt-get update -y \ - && env DEBIAN_FRONTEND=noninteractive \ - apt-get install --yes --no-install-recommends \ - bash \ - tzdata \ - parallel \ - expect \ - python3 \ - python3-lxml \ - python3-termcolor \ - python3-requests \ - curl \ - sudo \ - openssl \ - netcat-openbsd \ - brotli \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* - -COPY run.sh / - -ENV EXPORT_S3_STORAGE_POLICIES=1 - -CMD ["/bin/bash", "/run.sh"] diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index 8b949ed95db..6b9fb94a4c6 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -56,7 +56,5 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* -COPY process_functional_tests_result.py / - COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb ENV PATH="/opt/gdb/bin:${PATH}" diff --git a/docs/changelogs/v24.3.7.30-lts.md b/docs/changelogs/v24.3.7.30-lts.md new file mode 100644 index 00000000000..f945a54840f --- /dev/null +++ b/docs/changelogs/v24.3.7.30-lts.md @@ -0,0 +1,29 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.7.30-lts (c8a28cf4331) FIXME as compared to v24.3.6.48-lts (b2d33c3c45d) + +#### Improvement +* Backported in [#68103](https://github.com/ClickHouse/ClickHouse/issues/68103): Distinguish booleans and integers while parsing values for custom settings: ``` SET custom_a = true; SET custom_b = 1; ```. [#62206](https://github.com/ClickHouse/ClickHouse/pull/62206) ([Vitaly Baranov](https://github.com/vitlibar)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#67931](https://github.com/ClickHouse/ClickHouse/issues/67931): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#68062](https://github.com/ClickHouse/ClickHouse/issues/68062): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)). +* Backported in [#67812](https://github.com/ClickHouse/ClickHouse/issues/67812): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67848](https://github.com/ClickHouse/ClickHouse/issues/67848): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#68271](https://github.com/ClickHouse/ClickHouse/issues/68271): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#67806](https://github.com/ClickHouse/ClickHouse/issues/67806): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67834](https://github.com/ClickHouse/ClickHouse/issues/67834): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#68206](https://github.com/ClickHouse/ClickHouse/issues/68206): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#68089](https://github.com/ClickHouse/ClickHouse/issues/68089): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#68120](https://github.com/ClickHouse/ClickHouse/issues/68120): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Update version after release. [#67676](https://github.com/ClickHouse/ClickHouse/pull/67676) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Backported in [#68074](https://github.com/ClickHouse/ClickHouse/issues/68074): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)). + diff --git a/docs/changelogs/v24.3.8.13-lts.md b/docs/changelogs/v24.3.8.13-lts.md new file mode 100644 index 00000000000..6fbceacd624 --- /dev/null +++ b/docs/changelogs/v24.3.8.13-lts.md @@ -0,0 +1,16 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.8.13-lts (84bbfc70f5d) FIXME as compared to v24.3.7.30-lts (c8a28cf4331) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#68562](https://github.com/ClickHouse/ClickHouse/issues/68562): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#68114](https://github.com/ClickHouse/ClickHouse/issues/68114): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67989](https://github.com/ClickHouse/ClickHouse/issues/67989): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#68335](https://github.com/ClickHouse/ClickHouse/issues/68335): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#68392](https://github.com/ClickHouse/ClickHouse/issues/68392): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)). + diff --git a/docs/changelogs/v24.8.1.2684-lts.md b/docs/changelogs/v24.8.1.2684-lts.md new file mode 100644 index 00000000000..8171bb3d719 --- /dev/null +++ b/docs/changelogs/v24.8.1.2684-lts.md @@ -0,0 +1,525 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.8.1.2684-lts (161c62fd295) FIXME as compared to v24.8.1.1-new (3f8b27d7acc) + +#### Backward Incompatible Change +* `clickhouse-client` and `clickhouse-local` now default to multi-query mode (instead single-query mode). As an example, `clickhouse-client -q "SELECT 1; SELECT 2"` now works, whereas users previously had to add `--multiquery` (or `-n`). The `--multiquery/-n` switch became obsolete. INSERT queries in multi-query statements are treated specially based on their FORMAT clause: If the FORMAT is `VALUES` (the most common case), the end of the INSERT statement is represented by a trailing semicolon `;` at the end of the query. For all other FORMATs (e.g. `CSV` or `JSONEachRow`), the end of the INSERT statement is represented by two newlines `\n\n` at the end of the query. [#63898](https://github.com/ClickHouse/ClickHouse/pull/63898) ([FFish](https://github.com/wxybear)). +* In previous versions, it was possible to use an alternative syntax for `LowCardinality` data types by appending `WithDictionary` to the name of the data type. It was an initial working implementation, and it was never documented or exposed to the public. Now, it is deprecated. If you have used this syntax, you have to ALTER your tables and rename the data types to `LowCardinality`. [#66842](https://github.com/ClickHouse/ClickHouse/pull/66842) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix logical errors with storage `Buffer` used with distributed destination table. It's a backward incompatible change: queries using `Buffer` with a distributed destination table may stop working if the table appears more than once in the query (e.g., in a self-join). [#67015](https://github.com/ClickHouse/ClickHouse/pull/67015) ([vdimir](https://github.com/vdimir)). +* In previous versions, calling functions for random distributions based on the Gamma function (such as Chi-Squared, Student, Fisher) with negative arguments close to zero led to a long computation or an infinite loop. In the new version, calling these functions with zero or negative arguments will produce an exception. This closes [#67297](https://github.com/ClickHouse/ClickHouse/issues/67297). [#67326](https://github.com/ClickHouse/ClickHouse/pull/67326) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement Dynamic type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into Dynamic column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)). + +#### New Feature +* Add new experimental Kafka storage engine to store offsets in Keeper instead of relying on committing them to Kafka. [#57625](https://github.com/ClickHouse/ClickHouse/pull/57625) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Add new TimeSeries table engine: - by default: ``` CREATE TABLE tbl ENGINE=TimeSeries ``` - or with specifying engines of its internal tables:. [#64183](https://github.com/ClickHouse/ClickHouse/pull/64183) ([Vitaly Baranov](https://github.com/vitlibar)). +* Support more join strictnesses (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequal conditions which involve columns from both left and right table. e.g. t1.y < t2.y (see setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)). +* Add `_etag` virtual column for S3 table engine. Fixes [#65312](https://github.com/ClickHouse/ClickHouse/issues/65312). [#65386](https://github.com/ClickHouse/ClickHouse/pull/65386) ([skyoct](https://github.com/skyoct)). +* This pull request introduces Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Add function printf for spark compatiability. [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)). +* Backported in [#68450](https://github.com/ClickHouse/ClickHouse/issues/68450): Implement new JSON data type. [#66444](https://github.com/ClickHouse/ClickHouse/pull/66444) ([Kruglov Pavel](https://github.com/Avogar)). +* Add a new server setting: disable_insertion_and_mutation Set it to true. This node will deny all insertions and mutations(Alter table delete/update/drop partition). Include async insertion. [#66519](https://github.com/ClickHouse/ClickHouse/pull/66519) ([Xu Jia](https://github.com/XuJia0210)). +* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to Null engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)). +* Added support for reading MULTILINESTRING geometry in WKT format using function readWKTLineString. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)). +* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)). +* Support query `DROP DETACHED PARTITION ALL` to drop all detached partitions. [#67885](https://github.com/ClickHouse/ClickHouse/pull/67885) ([Duc Canh Le](https://github.com/canhld94)). +* Added a tagging (namespace) mechanism for the query cache. The same queries with different tags are considered different by the query cache. Example: `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'abc'` and `SELECT 1 SETTINGS use_query_cache = 1, query_cache_tag = 'def'` now create different query cache entries. [#68235](https://github.com/ClickHouse/ClickHouse/pull/68235) ([sakulali](https://github.com/sakulali)). + +#### Performance Improvement +* Use adaptive read task size calculation method (adaptive meaning it depends on read column sizes) for parallel replicas. [#60377](https://github.com/ClickHouse/ClickHouse/pull/60377) ([Nikita Taranov](https://github.com/nickitat)). +* Store the `plain_rewritable` disk directory metadata in `__meta` layout, separately from the merge tree data in the object storage. Move the `plain_rewritable` disk to a flat directory structure. [#65751](https://github.com/ClickHouse/ClickHouse/pull/65751) ([Julia Kartseva](https://github.com/jkartseva)). +* Enable `compile_expressions` (JIT compiler for fragments of ordinary expressions) by default. This closes [#51264](https://github.com/ClickHouse/ClickHouse/issues/51264) and [#56386](https://github.com/ClickHouse/ClickHouse/issues/56386). [#66486](https://github.com/ClickHouse/ClickHouse/pull/66486) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve columns squashing for String/Array/Map/Variant/Dynamic types by reserving required memory in advance for all subcolumns. [#67043](https://github.com/ClickHouse/ClickHouse/pull/67043) ([Kruglov Pavel](https://github.com/Avogar)). +* Speed up system flush logs, flush logs on shutdown. [#67472](https://github.com/ClickHouse/ClickHouse/pull/67472) ([Sema Checherinda](https://github.com/CheSema)). +* Backported in [#68496](https://github.com/ClickHouse/ClickHouse/issues/68496): Improved overall performance of merges by reducing the overhead of scheduling steps of merges. [#68016](https://github.com/ClickHouse/ClickHouse/pull/68016) ([Anton Popov](https://github.com/CurtizJ)). +* Setting `optimize_functions_to_subcolumns` is enabled by default. [#68053](https://github.com/ClickHouse/ClickHouse/pull/68053) ([Anton Popov](https://github.com/CurtizJ)). + +#### Improvement +* ClickHouse server now supports new setting `max_keep_alive_requests`. For keep-alive HTTP connections to the server it works in tandem with `keep_alive_timeout` - if idle timeout not expired but there already more than `max_keep_alive_requests` requests done through the given connection - it will be closed by the server. [#61793](https://github.com/ClickHouse/ClickHouse/pull/61793) ([Nikita Taranov](https://github.com/nickitat)). +* As in the new version, SOURCES are checked based on Table Engine logic, even grant table engine is disabled by default, if a source is not granted, a prompt of table engine would popup instead, which is misleading. [#65419](https://github.com/ClickHouse/ClickHouse/pull/65419) ([jsc0218](https://github.com/jsc0218)). +* Added statistics type `count_min` (count-min sketches) which provide selectivity estimations for equality predicates like `col = 'val'`. Supported data types are string, date, datatime and numeric types. [#65521](https://github.com/ClickHouse/ClickHouse/pull/65521) ([JackyWoo](https://github.com/JackyWoo)). +* Do not pass logs for keeper explicitly in the image to allow overriding. [#65564](https://github.com/ClickHouse/ClickHouse/pull/65564) ([Azat Khuzhin](https://github.com/azat)). +* Use `Atomic` database by default in `clickhouse-local`. Address items 1 and 5 from [#50647](https://github.com/ClickHouse/ClickHouse/issues/50647). Closes [#44817](https://github.com/ClickHouse/ClickHouse/issues/44817). [#65860](https://github.com/ClickHouse/ClickHouse/pull/65860) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add the `rows_before_aggregation_at_least` statistic to the query response when `rows_before_aggregation` is enabled. This statistic represents the number of rows read before aggregation. In the context of a distributed query, when using the `group by` or `max` aggregation function without a `limit`, `rows_before_aggregation_at_least` can reflect the number of rows hit by the query. [#66084](https://github.com/ClickHouse/ClickHouse/pull/66084) ([morning-color](https://github.com/morning-color)). +* Introduced `use_same_password_for_base_backup` settings for `BACKUP` and `RESTORE` queries, allowing to create and restore incremental backups to/from password protected archives. [#66214](https://github.com/ClickHouse/ClickHouse/pull/66214) ([Samuele](https://github.com/sguerrini97)). +* Ignore async_load_databases for ATTACH query (previously it was possible for ATTACH to return before the tables had been attached). [#66240](https://github.com/ClickHouse/ClickHouse/pull/66240) ([Azat Khuzhin](https://github.com/azat)). +* [Replicated]MergeTreeSink has to properly cancel its delayed_chunk on `onCancel()` method. [#66279](https://github.com/ClickHouse/ClickHouse/pull/66279) ([Sema Checherinda](https://github.com/CheSema)). +* Added logs and metrics for rejected connections (where there are not enough resources). [#66410](https://github.com/ClickHouse/ClickHouse/pull/66410) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Setting `allow_experimental_analyzer` is renamed to `enable_analyzer`. The old name is preserved in a form of an alias. [#66438](https://github.com/ClickHouse/ClickHouse/pull/66438) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Support true UUID type for MongoDB engine. [#66671](https://github.com/ClickHouse/ClickHouse/pull/66671) ([Azat Khuzhin](https://github.com/azat)). +* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)). +* Add replication lag and recovery time metrics. [#66703](https://github.com/ClickHouse/ClickHouse/pull/66703) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Add S3DiskNoKeyErrors metric. [#66704](https://github.com/ClickHouse/ClickHouse/pull/66704) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Ensure COMMENT clause works for all table engines. [#66832](https://github.com/ClickHouse/ClickHouse/pull/66832) ([Joe Lynch](https://github.com/joelynch)). +* Update the usage of error code `BAD_ARGUMENTS` and `ILLEGAL_TYPE_OF_ARGUMENT` by more accurate error codes when appropriate. [#66851](https://github.com/ClickHouse/ClickHouse/pull/66851) ([Yohann Jardin](https://github.com/yohannj)). +* Function `mapFromArrays` now accepts `Map(K, V)` as first argument, for example: `SELECT mapFromArrays(map('a', 4, 'b', 4), ['aa', 'bb'])` now works and returns `{('a',4):'aa',('b',4):'bb'}`. Also, if the 1st argument is an Array, it can now also be of type `Array(Nullable(T))` or `Array(LowCardinality(Nullable(T)))` as long as the actual array values are not `NULL`. [#67103](https://github.com/ClickHouse/ClickHouse/pull/67103) ([李扬](https://github.com/taiyang-li)). +* Read configuration for clickhouse-local from ~/.clickhouse-local. [#67135](https://github.com/ClickHouse/ClickHouse/pull/67135) ([Azat Khuzhin](https://github.com/azat)). +* Rename setting `input_format_orc_read_use_writer_time_zone` to `input_format_orc_reader_timezone` and allow the user to set the reader timezone. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)). +* Decrease level of 'Socket is not connected' error when HTTP connection immediately reset by peer after connecting, close [#34218](https://github.com/ClickHouse/ClickHouse/issues/34218). [#67177](https://github.com/ClickHouse/ClickHouse/pull/67177) ([vdimir](https://github.com/vdimir)). +* Speed up tables removal for `DROP DATABASE` query, increased the default value for `database_catalog_drop_table_concurrency` to 16. [#67228](https://github.com/ClickHouse/ClickHouse/pull/67228) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add ability to load dashboards for system.dashboards from config (once set, they overrides the default dashboards preset). [#67232](https://github.com/ClickHouse/ClickHouse/pull/67232) ([Azat Khuzhin](https://github.com/azat)). +* The window functions in SQL are traditionally in snake case. ClickHouse uses camelCase, so new aliases `denseRank()` and `percentRank()` have been created. These new functions can be called the exact same as the original `dense_rank()` and `percent_rank()` functions. Both snake case and camelCase syntaxes remain usable. A new test for each of the functions has been added as well. This closes [#67042](https://github.com/ClickHouse/ClickHouse/issues/67042) . [#67334](https://github.com/ClickHouse/ClickHouse/pull/67334) ([Peter Nguyen](https://github.com/petern48)). +* Autodetect configuration file format if is not .xml, .yml or .yaml. If the file begins with < it might be XML, otherwise it might be YAML. Non regular file just parse as XML such as PIPE: /dev/fd/X. [#67391](https://github.com/ClickHouse/ClickHouse/pull/67391) ([sakulali](https://github.com/sakulali)). +* Functions `formatDateTime` and `formatDateTimeInJodaSyntax` now treat their format parameter as optional. If it is not specified, format strings `%Y-%m-%d %H:%i:%s` and `yyyy-MM-dd HH:mm:ss` are assumed. Example: `SELECT parseDateTime('2021-01-04 23:12:34')` now returns DateTime value `2021-01-04 23:12:34` (previously, this threw an exception). [#67399](https://github.com/ClickHouse/ClickHouse/pull/67399) ([Robert Schulze](https://github.com/rschu1ze)). +* Automatically retry Keeper requests in KeeperMap if they happen because of timeout or connection loss. [#67448](https://github.com/ClickHouse/ClickHouse/pull/67448) ([Antonio Andelic](https://github.com/antonio2368)). +* Rework usage of custom table's disks. [#67684](https://github.com/ClickHouse/ClickHouse/pull/67684) ([Sema Checherinda](https://github.com/CheSema)). +* Various improvements in the advanced dashboard. This closes [#67697](https://github.com/ClickHouse/ClickHouse/issues/67697). This closes [#63407](https://github.com/ClickHouse/ClickHouse/issues/63407). This closes [#51129](https://github.com/ClickHouse/ClickHouse/issues/51129). This closes [#61204](https://github.com/ClickHouse/ClickHouse/issues/61204). [#67701](https://github.com/ClickHouse/ClickHouse/pull/67701) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Avoid allocate too much capacity for array column while writing orc & some minor refactors to make code cleaner. Performance speeds up 15% for array column. [#67879](https://github.com/ClickHouse/ClickHouse/pull/67879) ([李扬](https://github.com/taiyang-li)). +* Support OPTIMIZE query on Join table engine to reduce Join tables memory footprint. [#67883](https://github.com/ClickHouse/ClickHouse/pull/67883) ([Duc Canh Le](https://github.com/canhld94)). +* Add replication lag and recovery time metrics. [#67913](https://github.com/ClickHouse/ClickHouse/pull/67913) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Add '-no-pie' to aarch64 Linux builds to allow proper introspection and symbolizing of stacktraces after a ClickHouse restart. [#67916](https://github.com/ClickHouse/ClickHouse/pull/67916) ([filimonov](https://github.com/filimonov)). +* Backported in [#68481](https://github.com/ClickHouse/ClickHouse/issues/68481): Added profile events for merges and mutations for better introspection. [#68015](https://github.com/ClickHouse/ClickHouse/pull/68015) ([Anton Popov](https://github.com/CurtizJ)). +* Fix settings/current_database in system.processes for async BACKUP/RESTORE. [#68163](https://github.com/ClickHouse/ClickHouse/pull/68163) ([Azat Khuzhin](https://github.com/azat)). +* Remove unnecessary logs for MergeTree that doesn't support replication. [#68238](https://github.com/ClickHouse/ClickHouse/pull/68238) ([Daniil Ivanik](https://github.com/divanik)). +* Backported in [#68430](https://github.com/ClickHouse/ClickHouse/issues/68430): Improve schema inference of date times. Now DateTime64 used only when date time has fractional part, otherwise regular DateTime is used. Inference of Date/DateTime is more strict now, especially when `date_time_input_format='best_effort'` to avoid inferring date times from strings in corner cases. [#68382](https://github.com/ClickHouse/ClickHouse/pull/68382) ([Kruglov Pavel](https://github.com/Avogar)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)). +* Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)). +* Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)). +* Fix formatting of query with aliased JOIN ON expression, e.g. `... JOIN t2 ON (x = y) AS e ORDER BY x` should be formatted as `... JOIN t2 ON ((x = y) AS e) ORDER BY x`. [#66312](https://github.com/ClickHouse/ClickHouse/pull/66312) ([vdimir](https://github.com/vdimir)). +* Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible runtime error while converting Array field with nulls to Array(Variant). [#66727](https://github.com/ClickHouse/ClickHouse/pull/66727) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)). +* Fix creating KeeperMap table after an incomplete drop. [#66865](https://github.com/ClickHouse/ClickHouse/pull/66865) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix broken part error while restoring to a `s3_plain_rewritable` disk. [#66881](https://github.com/ClickHouse/ClickHouse/pull/66881) ([Vitaly Baranov](https://github.com/vitlibar)). +* In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)). +* Fix invalid format detection in schema inference that could lead to logical error Format {} doesn't support schema inference. [#66899](https://github.com/ClickHouse/ClickHouse/pull/66899) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)). +* Forbid create as select even when database_replicated_allow_heavy_create is set. It was unconditionally forbidden in 23.12 and accidentally allowed under the setting in unreleased 24.7. [#66980](https://github.com/ClickHouse/ClickHouse/pull/66980) ([vdimir](https://github.com/vdimir)). +* Reading from the `numbers` could wrongly throw an exception when the `max_rows_to_read` limit was set. This closes [#66992](https://github.com/ClickHouse/ClickHouse/issues/66992). [#66996](https://github.com/ClickHouse/ClickHouse/pull/66996) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add proper type conversion to lagInFrame and leadInFrame window functions - fixes msan test. [#67091](https://github.com/ClickHouse/ClickHouse/pull/67091) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)). +* TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Use a separate client context in `clickhouse-local`. [#67133](https://github.com/ClickHouse/ClickHouse/pull/67133) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Correct behavior of `ORDER BY all` with disabled `enable_order_by_all` and parallel replicas (distributed queries as well). [#67153](https://github.com/ClickHouse/ClickHouse/pull/67153) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix wrong usage of input_format_max_bytes_to_read_for_schema_inference in schema cache. [#67157](https://github.com/ClickHouse/ClickHouse/pull/67157) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix the memory leak for count distinct, when exception issued during group by single nullable key. [#67171](https://github.com/ClickHouse/ClickHouse/pull/67171) ([Jet He](https://github.com/compasses)). +* This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)). +* Fix error `Conversion from AggregateFunction(name, Type) to AggregateFunction(name, Nullable(Type)) is not supported`. The bug was caused by the `optimize_rewrite_aggregate_function_with_if` optimization. Fixes [#67112](https://github.com/ClickHouse/ClickHouse/issues/67112). [#67229](https://github.com/ClickHouse/ClickHouse/pull/67229) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix hung query when using empty tuple as lhs of function IN. [#67295](https://github.com/ClickHouse/ClickHouse/pull/67295) ([Duc Canh Le](https://github.com/canhld94)). +* Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)). +* It was possible to create a very deep nested JSON data that triggered stack overflow while skipping unknown fields. This closes [#67292](https://github.com/ClickHouse/ClickHouse/issues/67292). [#67324](https://github.com/ClickHouse/ClickHouse/pull/67324) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix attaching ReplicatedMergeTree table after exception during startup. [#67360](https://github.com/ClickHouse/ClickHouse/pull/67360) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix segfault caused by incorrectly detaching from thread group in `Aggregator`. [#67385](https://github.com/ClickHouse/ClickHouse/pull/67385) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix one more case when a non-deterministic function is specified in PK. [#67395](https://github.com/ClickHouse/ClickHouse/pull/67395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fixed `bloom_filter` index breaking queries with mildly weird conditions like `(k=2)=(k=2)` or `has([1,2,3], k)`. [#67423](https://github.com/ClickHouse/ClickHouse/pull/67423) ([Michael Kolupaev](https://github.com/al13n321)). +* Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix wait for tasks in ~WriteBufferFromS3 in case WriteBuffer was cancelled. [#67459](https://github.com/ClickHouse/ClickHouse/pull/67459) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Protect temporary part directories from removing during RESTORE. [#67491](https://github.com/ClickHouse/ClickHouse/pull/67491) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)). +* Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)). +* Fix `Logical error: Expected the argument №N of type T to have X rows, but it has 0`. The error could happen in a remote query with constant expression in `GROUP BY` (with a new analyzer). [#67536](https://github.com/ClickHouse/ClickHouse/pull/67536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix join on tuple with NULLs: Some queries with the new analyzer and `NULL` inside the tuple in the `JOIN ON` section returned incorrect results. [#67538](https://github.com/ClickHouse/ClickHouse/pull/67538) ([vdimir](https://github.com/vdimir)). +* Fix redundant reschedule of FileCache::freeSpaceRatioKeepingThreadFunc() in case of full non-evictable cache. [#67540](https://github.com/ClickHouse/ClickHouse/pull/67540) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix for function `toStartOfWeek` which returned the wrong result with a small `DateTime64` value. [#67558](https://github.com/ClickHouse/ClickHouse/pull/67558) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix `Logical error: 'file_offset_of_buffer_end <= read_until_position'` in filesystem cache. Closes [#57508](https://github.com/ClickHouse/ClickHouse/issues/57508). [#67623](https://github.com/ClickHouse/ClickHouse/pull/67623) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fixes [#62282](https://github.com/ClickHouse/ClickHouse/issues/62282). Removed the call to `convertFieldToString()` and added datatype specific serialization code. Parameterized view substitution was broken for multiple datatypes when parameter value was a function or expression returning datatype instance. [#67654](https://github.com/ClickHouse/ClickHouse/pull/67654) ([Shankar](https://github.com/shiyer7474)). +* Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)). +* Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix possible logical error "Unexpected return type from if" with experimental Variant type and enabled setting `use_variant_as_common_type ` in function if with Tuples and Maps. [#67687](https://github.com/ClickHouse/ClickHouse/pull/67687) ([Kruglov Pavel](https://github.com/Avogar)). +* Due to a bug in Linux Kernel, a query can hung in `TimerDescriptor::drain`. This closes [#37686](https://github.com/ClickHouse/ClickHouse/issues/37686). [#67702](https://github.com/ClickHouse/ClickHouse/pull/67702) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix completion of `RESTORE ON CLUSTER` command. [#67720](https://github.com/ClickHouse/ClickHouse/pull/67720) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix dictionary hang in case of CANNOT_SCHEDULE_TASK while loading. [#67751](https://github.com/ClickHouse/ClickHouse/pull/67751) ([Azat Khuzhin](https://github.com/azat)). +* Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Queries like `SELECT count() FROM t WHERE cast(c = 1 or c = 9999 AS Bool) SETTINGS use_skip_indexes=1` with bloom filter indexes on `c` now work correctly. [#67781](https://github.com/ClickHouse/ClickHouse/pull/67781) ([jsc0218](https://github.com/jsc0218)). +* Fix wrong aggregation result in some queries with aggregation without keys and filter, close [#67419](https://github.com/ClickHouse/ClickHouse/issues/67419). [#67804](https://github.com/ClickHouse/ClickHouse/pull/67804) ([vdimir](https://github.com/vdimir)). +* Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix DateTime64 parsing after constant folding in distributed queries, close [#66773](https://github.com/ClickHouse/ClickHouse/issues/66773). [#67920](https://github.com/ClickHouse/ClickHouse/pull/67920) ([vdimir](https://github.com/vdimir)). +* Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)). +* Now ClickHouse doesn't consider part as broken if projection doesn't exist on disk but exists in `checksums.txt`. [#68003](https://github.com/ClickHouse/ClickHouse/pull/68003) ([alesapin](https://github.com/alesapin)). +* Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)). +* Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)). +* Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)). +* Attempt to fix `Block structure mismatch in AggregatingStep stream: different types` for aggregate projection optimization. [#68107](https://github.com/ClickHouse/ClickHouse/pull/68107) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#68343](https://github.com/ClickHouse/ClickHouse/issues/68343): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#68400](https://github.com/ClickHouse/ClickHouse/issues/68400): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)). + +#### Build/Testing/Packaging Improvement +* Improved `test_storage_s3` tests: increased `s3_max_single_read_retries` for read from "unstable" s3 source and allowed all tests to run multiple times in a row. [#66896](https://github.com/ClickHouse/ClickHouse/pull/66896) ([Ilya Yatsishin](https://github.com/qoega)). +* Integration tests flaky check will not run each test case multiple times to find more issues in tests and make them more reliable. It is using `pytest-repeat` library to run test case multiple times for the same environment. It is important to cleanup tables and other entities in the end of a test case to pass. Repeat works much faster than several pytest runs as it starts necessary containers only once. [#66986](https://github.com/ClickHouse/ClickHouse/pull/66986) ([Ilya Yatsishin](https://github.com/qoega)). +* Allow to use CLion with ClickHouse. In previous versions, CLion freezed for a minute on every keypress. This closes [#66994](https://github.com/ClickHouse/ClickHouse/issues/66994). [#66995](https://github.com/ClickHouse/ClickHouse/pull/66995) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Getauxval: avoid crash under sanitizer re-exec due to high aslr entropy. [#67081](https://github.com/ClickHouse/ClickHouse/pull/67081) ([Raúl Marín](https://github.com/Algunenano)). +* Some parts of client code are extracted to a single file and highest possible level optimization is applied to them even for debug builds. This closes: [#65745](https://github.com/ClickHouse/ClickHouse/issues/65745). [#67215](https://github.com/ClickHouse/ClickHouse/pull/67215) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). + +#### NO CL CATEGORY + +* Backported in [#68416](https://github.com/ClickHouse/ClickHouse/issues/68416):. [#68386](https://github.com/ClickHouse/ClickHouse/pull/68386) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Fix for 992 and friends"'. [#66993](https://github.com/ClickHouse/ClickHouse/pull/66993) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Revert "Fix for 992 and friends""'. [#67029](https://github.com/ClickHouse/ClickHouse/pull/67029) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "FuzzQuery table function"'. [#67040](https://github.com/ClickHouse/ClickHouse/pull/67040) ([Raúl Marín](https://github.com/Algunenano)). +* NO CL ENTRY: 'Revert "Enable `compile_expressions` by default."'. [#67299](https://github.com/ClickHouse/ClickHouse/pull/67299) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Slightly better calculation of primary index"'. [#67392](https://github.com/ClickHouse/ClickHouse/pull/67392) ([alesapin](https://github.com/alesapin)). +* NO CL ENTRY: 'Revert "Add settings to replace external engines to Null during create"'. [#67507](https://github.com/ClickHouse/ClickHouse/pull/67507) ([Raúl Marín](https://github.com/Algunenano)). +* NO CL ENTRY: 'Revert "Revert "Add settings to replace external engines to Null during create""'. [#67511](https://github.com/ClickHouse/ClickHouse/pull/67511) ([Ilya Yatsishin](https://github.com/qoega)). +* NO CL ENTRY: 'Revert "Add replication lag and recovery time metrics"'. [#67731](https://github.com/ClickHouse/ClickHouse/pull/67731) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Revert "Slightly better calculation of primary index""'. [#67846](https://github.com/ClickHouse/ClickHouse/pull/67846) ([Anton Popov](https://github.com/CurtizJ)). +* NO CL ENTRY: 'Revert "CI: Strict job timeout 1.5h for tests, 2h for builds"'. [#67986](https://github.com/ClickHouse/ClickHouse/pull/67986) ([Max K.](https://github.com/maxknv)). +* NO CL ENTRY: 'Revert "Bump rocksdb from v8.10 to v9.4 + enable jemalloc and liburing"'. [#68014](https://github.com/ClickHouse/ClickHouse/pull/68014) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* NO CL ENTRY: 'Revert "Use `Atomic` database by default in `clickhouse-local`"'. [#68023](https://github.com/ClickHouse/ClickHouse/pull/68023) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Refactor tests for (experimental) statistics"'. [#68156](https://github.com/ClickHouse/ClickHouse/pull/68156) ([Alexander Tokmakov](https://github.com/tavplubix)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* CI: enable libfuzzer (fixing build and docker). [#61908](https://github.com/ClickHouse/ClickHouse/pull/61908) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Initial implementation of vector similarity index. [#63675](https://github.com/ClickHouse/ClickHouse/pull/63675) ([Robert Schulze](https://github.com/rschu1ze)). +* Update zlib-ng from 2.0.2 to 2.1.7. [#64489](https://github.com/ClickHouse/ClickHouse/pull/64489) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix 02444_async_broken_outdated_part_loading flakiness. [#64956](https://github.com/ClickHouse/ClickHouse/pull/64956) ([Azat Khuzhin](https://github.com/azat)). +* attach_gdb.lib: print more information before all stacks. [#65253](https://github.com/ClickHouse/ClickHouse/pull/65253) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix some perf tests. [#65320](https://github.com/ClickHouse/ClickHouse/pull/65320) ([Nikita Taranov](https://github.com/nickitat)). +* Remove ActionsDAGPtr whenever it is possible. [#65414](https://github.com/ClickHouse/ClickHouse/pull/65414) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Scheduler queue throughput introspection, Fix CPU indication in client. [#65654](https://github.com/ClickHouse/ClickHouse/pull/65654) ([Sergei Trifonov](https://github.com/serxa)). +* Increase timeout in 02122_join_group_by_timeout for tsan build. [#65976](https://github.com/ClickHouse/ClickHouse/pull/65976) ([vdimir](https://github.com/vdimir)). +* Remove default values for certificateFile/privateKeyFile/dhParamsFile in keeper config (to avoid annoying errors in logs). [#65978](https://github.com/ClickHouse/ClickHouse/pull/65978) ([Azat Khuzhin](https://github.com/azat)). +* Update version_date.tsv and changelogs after v24.3.5.46-lts. [#66054](https://github.com/ClickHouse/ClickHouse/pull/66054) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Fix flaky `02814_currentDatabase_for_table_functions`. [#66111](https://github.com/ClickHouse/ClickHouse/pull/66111) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix possible data-race StorageKafka with statistics_interval_ms>0. [#66311](https://github.com/ClickHouse/ClickHouse/pull/66311) ([Azat Khuzhin](https://github.com/azat)). +* Avoid unneeded calculation in SeriesPeriodDetect. [#66320](https://github.com/ClickHouse/ClickHouse/pull/66320) ([Ruihang Xia](https://github.com/waynexia)). +* It aims to complete [#58630](https://github.com/ClickHouse/ClickHouse/issues/58630). This is made possible by [#60463](https://github.com/ClickHouse/ClickHouse/issues/60463), [#61459](https://github.com/ClickHouse/ClickHouse/issues/61459) and [#60082](https://github.com/ClickHouse/ClickHouse/issues/60082). [#66443](https://github.com/ClickHouse/ClickHouse/pull/66443) ([Amos Bird](https://github.com/amosbird)). +* Allow run query instantly in play. [#66457](https://github.com/ClickHouse/ClickHouse/pull/66457) ([Aleksandr Musorin](https://github.com/AVMusorin)). +* Bump ICU from v70 to v75. [#66474](https://github.com/ClickHouse/ClickHouse/pull/66474) ([Robert Schulze](https://github.com/rschu1ze)). +* Bump RocksDB from v6.29.5 to v7.10.2. [#66475](https://github.com/ClickHouse/ClickHouse/pull/66475) ([Robert Schulze](https://github.com/rschu1ze)). +* Bump RocksDB from v7.10.2 to v8.9.1. [#66479](https://github.com/ClickHouse/ClickHouse/pull/66479) ([Robert Schulze](https://github.com/rschu1ze)). +* I believe the error code for this function should not be "NOT_ALLOWED" since it simply is an invalid query and "BAD_QUERY_PARAMETER" is a more reasonable error code for this. [#66491](https://github.com/ClickHouse/ClickHouse/pull/66491) ([Ali](https://github.com/xogoodnow)). +* Update gdb to 15.1 (by compiling from sources). [#66494](https://github.com/ClickHouse/ClickHouse/pull/66494) ([Azat Khuzhin](https://github.com/azat)). +* Ensure that llvm-symbolizer is used for symbolizing sanitizer reports. [#66495](https://github.com/ClickHouse/ClickHouse/pull/66495) ([Azat Khuzhin](https://github.com/azat)). +* Remove unused local variables. [#66503](https://github.com/ClickHouse/ClickHouse/pull/66503) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This will solve a lot of problems with inconsistent formatting. And it opens the path for [#65753](https://github.com/ClickHouse/ClickHouse/issues/65753). This closes [#66807](https://github.com/ClickHouse/ClickHouse/issues/66807). This closes [#61611](https://github.com/ClickHouse/ClickHouse/issues/61611). This closes [#61711](https://github.com/ClickHouse/ClickHouse/issues/61711). This closes [#67445](https://github.com/ClickHouse/ClickHouse/issues/67445). [#66506](https://github.com/ClickHouse/ClickHouse/pull/66506) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Rename Context::getSettings() to Context::getSettingsCopy(). [#66528](https://github.com/ClickHouse/ClickHouse/pull/66528) ([Raúl Marín](https://github.com/Algunenano)). +* Uninteresting change: introducing `ClientApplicationBase`. [#66549](https://github.com/ClickHouse/ClickHouse/pull/66549) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Respond to parallel replicas protocol requests with priority on initiator. [#66618](https://github.com/ClickHouse/ClickHouse/pull/66618) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix bad code: it was catching exceptions. [#66628](https://github.com/ClickHouse/ClickHouse/pull/66628) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Dump all memory stats in CgroupsMemoryUsageObserver on hitting the limit. [#66732](https://github.com/ClickHouse/ClickHouse/pull/66732) ([Nikita Taranov](https://github.com/nickitat)). +* Save writer thread id in shared mutex for debugging. [#66745](https://github.com/ClickHouse/ClickHouse/pull/66745) ([Alexander Gololobov](https://github.com/davenger)). +* Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)). +* Looks like it runs too many mutations sometimes and fails to process them within the timeout. So if a query waits for mutations - the test fails. [#66785](https://github.com/ClickHouse/ClickHouse/pull/66785) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Better diagnostics in functional tests. [#66790](https://github.com/ClickHouse/ClickHouse/pull/66790) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad test `01042_system_reload_dictionary_reloads_completely`. [#66811](https://github.com/ClickHouse/ClickHouse/pull/66811) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Something is strange with the test about refreshable materialized views. [#66816](https://github.com/ClickHouse/ClickHouse/pull/66816) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Randomize `trace_profile_events`. [#66821](https://github.com/ClickHouse/ClickHouse/pull/66821) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Stop ignoring SIGSEGV in GDB. [#66847](https://github.com/ClickHouse/ClickHouse/pull/66847) ([Antonio Andelic](https://github.com/antonio2368)). +* Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)). +* When executing queries with parallel replicas that involve only a subset of nodes within a shard, the current behavior is that if all participating replicas are unavailable, the query completes without any errors but returns no results. Referencing issue [#65467](https://github.com/ClickHouse/ClickHouse/issues/65467), this pull request addresses the issue where only a portion of the nodes in a shard are participating in the execution. [#66880](https://github.com/ClickHouse/ClickHouse/pull/66880) ([zoomxi](https://github.com/zoomxi)). +* Speed up stateful tests setup. [#66886](https://github.com/ClickHouse/ClickHouse/pull/66886) ([Raúl Marín](https://github.com/Algunenano)). +* Functions [h-r]*: Iterate over input_rows_count where appropriate. [#66897](https://github.com/ClickHouse/ClickHouse/pull/66897) ([Robert Schulze](https://github.com/rschu1ze)). +* Stateless tests: Change status of failed tests in case of server crash and add no-parallel to high-load tests. [#66901](https://github.com/ClickHouse/ClickHouse/pull/66901) ([Nikita Fomichev](https://github.com/fm4v)). +* Fix performance test about the generateRandom table function, supposedly. [#66906](https://github.com/ClickHouse/ClickHouse/pull/66906) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad tests `share_big_sets`, CC @davenger. [#66908](https://github.com/ClickHouse/ClickHouse/pull/66908) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Speed up mutations for non-replicated MergeTree a bit. [#66909](https://github.com/ClickHouse/ClickHouse/pull/66909) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Speed up mutations for non-replicated MergeTree significantly. [#66911](https://github.com/ClickHouse/ClickHouse/pull/66911) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix views over distributed tables with Analyzer. [#66912](https://github.com/ClickHouse/ClickHouse/pull/66912) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* [CI fest] Try to fix `test_broken_projections/test.py::test_broken_ignored_replicated`. [#66915](https://github.com/ClickHouse/ClickHouse/pull/66915) ([Andrey Zvonov](https://github.com/zvonand)). +* Decrease rate limit in `01923_network_receive_time_metric_insert`. [#66924](https://github.com/ClickHouse/ClickHouse/pull/66924) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Grouparrayintersect: fix serialization bug. [#66928](https://github.com/ClickHouse/ClickHouse/pull/66928) ([Raúl Marín](https://github.com/Algunenano)). +* Update version after release branch. [#66929](https://github.com/ClickHouse/ClickHouse/pull/66929) ([Raúl Marín](https://github.com/Algunenano)). +* Un-flake test_runtime_configurable_cache_size. [#66934](https://github.com/ClickHouse/ClickHouse/pull/66934) ([Robert Schulze](https://github.com/rschu1ze)). +* fix unit tests ResolvePoolTest with timeouts. [#66953](https://github.com/ClickHouse/ClickHouse/pull/66953) ([Sema Checherinda](https://github.com/CheSema)). +* Split slow test 03036_dynamic_read_subcolumns. [#66954](https://github.com/ClickHouse/ClickHouse/pull/66954) ([Nikita Taranov](https://github.com/nickitat)). +* CI: Fixes docker server build for release branches. [#66955](https://github.com/ClickHouse/ClickHouse/pull/66955) ([Max K.](https://github.com/maxknv)). +* Addressing issue [#64936](https://github.com/ClickHouse/ClickHouse/issues/64936). [#66973](https://github.com/ClickHouse/ClickHouse/pull/66973) ([alesapin](https://github.com/alesapin)). +* Add initial 24.7 changelog. [#66976](https://github.com/ClickHouse/ClickHouse/pull/66976) ([Raúl Marín](https://github.com/Algunenano)). +* Apply libunwind fix. [#66977](https://github.com/ClickHouse/ClickHouse/pull/66977) ([Michael Kolupaev](https://github.com/al13n321)). +* CI: Add logs for debugging. [#66979](https://github.com/ClickHouse/ClickHouse/pull/66979) ([Max K.](https://github.com/maxknv)). +* [CI Fest] Split dynamic tests and rewrite them from sh to sql to avoid timeouts. [#66981](https://github.com/ClickHouse/ClickHouse/pull/66981) ([Kruglov Pavel](https://github.com/Avogar)). +* Split 01508_partition_pruning_long. [#66983](https://github.com/ClickHouse/ClickHouse/pull/66983) ([Nikita Taranov](https://github.com/nickitat)). +* [CI Fest] Fix use-of-uninitialized-value in JSONExtract* numeric functions. [#66984](https://github.com/ClickHouse/ClickHouse/pull/66984) ([Kruglov Pavel](https://github.com/Avogar)). +* It should fix SQLancer checks, but for some reason we stopped invalidating cache for docker builds and fix was not published to our CI for a while. [#66987](https://github.com/ClickHouse/ClickHouse/pull/66987) ([Ilya Yatsishin](https://github.com/qoega)). +* Fixes [#66941](https://github.com/ClickHouse/ClickHouse/issues/66941). [#66991](https://github.com/ClickHouse/ClickHouse/pull/66991) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Remove the support for Kerberized HDFS. [#66998](https://github.com/ClickHouse/ClickHouse/pull/66998) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* CI: Fix for workflow results parsing. [#67000](https://github.com/ClickHouse/ClickHouse/pull/67000) ([Max K.](https://github.com/maxknv)). +* Fix flaky `01454_storagememory_data_race_challenge`. [#67003](https://github.com/ClickHouse/ClickHouse/pull/67003) ([Antonio Andelic](https://github.com/antonio2368)). +* CI: Jepsen Workflow fix for skipped builds and observability. [#67004](https://github.com/ClickHouse/ClickHouse/pull/67004) ([Max K.](https://github.com/maxknv)). +* bugfix AttachedTable counting not symmetry, and adding some test logs…. [#67007](https://github.com/ClickHouse/ClickHouse/pull/67007) ([Xu Jia](https://github.com/XuJia0210)). +* CI: Automerge when required and non-required checks completed. [#67008](https://github.com/ClickHouse/ClickHouse/pull/67008) ([Max K.](https://github.com/maxknv)). +* Fix test `very_long_arrays`. [#67009](https://github.com/ClickHouse/ClickHouse/pull/67009) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Try to fix exception logging in destructors of static objects. [#67016](https://github.com/ClickHouse/ClickHouse/pull/67016) ([Antonio Andelic](https://github.com/antonio2368)). +* [Green CI] Fix test test_storage_azure_blob_storage. [#67019](https://github.com/ClickHouse/ClickHouse/pull/67019) ([Daniil Ivanik](https://github.com/divanik)). +* Integration tests: fix flaky tests `test_backup_restore_on_cluster/test_concurrency.py` & `test_manipulate_statistics/test.py`. [#67027](https://github.com/ClickHouse/ClickHouse/pull/67027) ([Nikita Fomichev](https://github.com/fm4v)). +* [Green CI] Fix test test_storage_s3_queue/test.py::test_max_set_age. [#67035](https://github.com/ClickHouse/ClickHouse/pull/67035) ([Pablo Marcos](https://github.com/pamarcos)). +* Test for alter select with parallel replicas. [#67041](https://github.com/ClickHouse/ClickHouse/pull/67041) ([Igor Nikonov](https://github.com/devcrafter)). +* Split query into multiple queries to consume less memory at once + use less data. Fixes [#67034](https://github.com/ClickHouse/ClickHouse/issues/67034). [#67044](https://github.com/ClickHouse/ClickHouse/pull/67044) ([alesapin](https://github.com/alesapin)). +* Disable setting `optimize_functions_to_subcolumns`. [#67046](https://github.com/ClickHouse/ClickHouse/pull/67046) ([Anton Popov](https://github.com/CurtizJ)). +* Increase max allocation size for sanitizers. [#67049](https://github.com/ClickHouse/ClickHouse/pull/67049) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* rewrite 01171 test. [#67054](https://github.com/ClickHouse/ClickHouse/pull/67054) ([Sema Checherinda](https://github.com/CheSema)). +* Add `**` to `hdfs` docs, add test for `**` in `hdfs`. [#67064](https://github.com/ClickHouse/ClickHouse/pull/67064) ([Andrey Zvonov](https://github.com/zvonand)). +* Very sad failure: ``` 2024.07.24 13:28:45.517777 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} executeQuery: (from 172.16.11.1:55890) OPTIMIZE TABLE replicated_mt FINAL (stage: Complete) 2024.07.24 13:28:45.525945 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (ReplicatedMergeTreeQueue): Waiting for 4 entries to be processed: queue-0000000004, queue-0000000002, queue-0000000001, queue-0000000000 2024.07.24 13:29:15.528024 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e) (MergerMutator): Selected 3 parts from all_0_0_0 to all_2_2_0 2024.07.24 13:29:15.530736 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Created log entry /clickhouse/tables/replicated_mt/log/log-0000000004 for merge all_0_2_1 2024.07.24 13:29:15.530873 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for node1 to process log entry 2024.07.24 13:29:15.530919 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for node1 to pull log-0000000004 to queue 2024.07.24 13:29:15.534286 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Looking for node corresponding to log-0000000004 in node1 queue 2024.07.24 13:29:15.534793 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} default.replicated_mt (6581a6fb-8458-466d-8350-89951eb1ac8e): Waiting for queue-0000000005 to disappear from node1 queue 2024.07.24 13:29:15.585533 [ 10 ] {08745bf9-4bc1-4946-b9a8-c03d82ec55dc} TCPHandler: Processed in 30.067804125 sec. ```. [#67067](https://github.com/ClickHouse/ClickHouse/pull/67067) ([alesapin](https://github.com/alesapin)). +* Fix flaky `test_seekable_formats_url` and `test_seekable_formats` S3 storage tests. [#67070](https://github.com/ClickHouse/ClickHouse/pull/67070) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* CI: Docker server build fix for new release workflow. [#67075](https://github.com/ClickHouse/ClickHouse/pull/67075) ([Max K.](https://github.com/maxknv)). +* Fix 2680 flasky. [#67078](https://github.com/ClickHouse/ClickHouse/pull/67078) ([jsc0218](https://github.com/jsc0218)). +* [CI Fest] Fix flaky 02447_drop_replica test. [#67085](https://github.com/ClickHouse/ClickHouse/pull/67085) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fixes [#67030](https://github.com/ClickHouse/ClickHouse/issues/67030). [#67086](https://github.com/ClickHouse/ClickHouse/pull/67086) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Increase timeout for curl in tests. [#67089](https://github.com/ClickHouse/ClickHouse/pull/67089) ([Anton Popov](https://github.com/CurtizJ)). +* Try calculating available memory if ClickHouse is bound to subset of NUMA nodes. [#67098](https://github.com/ClickHouse/ClickHouse/pull/67098) ([Antonio Andelic](https://github.com/antonio2368)). +* A more precise way of tracking flushing time in 01246_buffer_flush. [#67099](https://github.com/ClickHouse/ClickHouse/pull/67099) ([Azat Khuzhin](https://github.com/azat)). +* Do not fail CheckReadyForMerge on failed Tests_2 (non-required jobs) Do not skip CiBuddy report step on failures. [#67101](https://github.com/ClickHouse/ClickHouse/pull/67101) ([Max K.](https://github.com/maxknv)). +* Tststs_1 - for all required checks Tststs_2 - for all non-required checks (normal mode) Tststs_2_ww - for all non-required checks (woolenwolfdog mode). [#67104](https://github.com/ClickHouse/ClickHouse/pull/67104) ([Max K.](https://github.com/maxknv)). +* Functions [s-t]*: Iterate over input_rows_count where appropriate. [#67105](https://github.com/ClickHouse/ClickHouse/pull/67105) ([Robert Schulze](https://github.com/rschu1ze)). +* Reintroduce 02805_distributed_queries_timeouts. [#67106](https://github.com/ClickHouse/ClickHouse/pull/67106) ([Azat Khuzhin](https://github.com/azat)). +* Added some tests in relation with [#54881](https://github.com/ClickHouse/ClickHouse/issues/54881). [#67110](https://github.com/ClickHouse/ClickHouse/pull/67110) ([max-vostrikov](https://github.com/max-vostrikov)). +* Reintroduce 03002_part_log_rmt_fetch_*_error tests without flakiness and less time. [#67113](https://github.com/ClickHouse/ClickHouse/pull/67113) ([Azat Khuzhin](https://github.com/azat)). +* Improve tag matching in backport scripts. [#67118](https://github.com/ClickHouse/ClickHouse/pull/67118) ([Raúl Marín](https://github.com/Algunenano)). +* Fixes [#67111](https://github.com/ClickHouse/ClickHouse/issues/67111). [#67121](https://github.com/ClickHouse/ClickHouse/pull/67121) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Increase lock_acquire_timeout_for_background_operations setting in dynamic merges tests. [#67126](https://github.com/ClickHouse/ClickHouse/pull/67126) ([Kruglov Pavel](https://github.com/Avogar)). +* Attempt to fix flakiness of some window view tests. [#67130](https://github.com/ClickHouse/ClickHouse/pull/67130) ([Robert Schulze](https://github.com/rschu1ze)). +* Update assert in cache. [#67138](https://github.com/ClickHouse/ClickHouse/pull/67138) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix test `00673_subquery_prepared_set_performance`. [#67141](https://github.com/ClickHouse/ClickHouse/pull/67141) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fixes [#67047](https://github.com/ClickHouse/ClickHouse/issues/67047). [#67142](https://github.com/ClickHouse/ClickHouse/pull/67142) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Lower max allocation size in query fuzzer. [#67145](https://github.com/ClickHouse/ClickHouse/pull/67145) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fixes [#66966](https://github.com/ClickHouse/ClickHouse/issues/66966). [#67147](https://github.com/ClickHouse/ClickHouse/pull/67147) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Try fix `02481_async_insert_race_long` flakiness. [#67148](https://github.com/ClickHouse/ClickHouse/pull/67148) ([Julia Kartseva](https://github.com/jkartseva)). +* Rename (unreleased) bad setting. [#67149](https://github.com/ClickHouse/ClickHouse/pull/67149) ([Raúl Marín](https://github.com/Algunenano)). +* Uncomment accidentally commented out code in QueryProfiler. [#67152](https://github.com/ClickHouse/ClickHouse/pull/67152) ([Michael Kolupaev](https://github.com/al13n321)). +* Try to fix 2572. [#67158](https://github.com/ClickHouse/ClickHouse/pull/67158) ([jsc0218](https://github.com/jsc0218)). +* Fix benign data race in ZooKeeper. [#67164](https://github.com/ClickHouse/ClickHouse/pull/67164) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove too long unit test. [#67168](https://github.com/ClickHouse/ClickHouse/pull/67168) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix `00705_drop_create_merge_tree`. [#67170](https://github.com/ClickHouse/ClickHouse/pull/67170) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix stacktrace cache. [#67173](https://github.com/ClickHouse/ClickHouse/pull/67173) ([Antonio Andelic](https://github.com/antonio2368)). +* Fixes [#67151](https://github.com/ClickHouse/ClickHouse/issues/67151). [#67174](https://github.com/ClickHouse/ClickHouse/pull/67174) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Make 02908_many_requests_to_system_replicas less stressful. [#67176](https://github.com/ClickHouse/ClickHouse/pull/67176) ([Alexander Gololobov](https://github.com/davenger)). +* Reduce max time of 00763_long_lock_buffer_alter_destination_table. [#67185](https://github.com/ClickHouse/ClickHouse/pull/67185) ([Raúl Marín](https://github.com/Algunenano)). +* Do not count AttachedTable for tables in information schema databases. [#67187](https://github.com/ClickHouse/ClickHouse/pull/67187) ([Sergei Trifonov](https://github.com/serxa)). +* Verbose output for 03203_client_benchmark_options. [#67188](https://github.com/ClickHouse/ClickHouse/pull/67188) ([vdimir](https://github.com/vdimir)). +* Split test 02967_parallel_replicas_join_algo_and_analyzer. [#67211](https://github.com/ClickHouse/ClickHouse/pull/67211) ([Nikita Taranov](https://github.com/nickitat)). +* Fix flaky `test_pkill_query_log` (tsan). [#67223](https://github.com/ClickHouse/ClickHouse/pull/67223) ([Sergei Trifonov](https://github.com/serxa)). +* Remove integration test `test_broken_projections_in_backups_1`. [#67231](https://github.com/ClickHouse/ClickHouse/pull/67231) ([Vitaly Baranov](https://github.com/vitlibar)). +* Debug logging for [#67002](https://github.com/ClickHouse/ClickHouse/issues/67002). [#67233](https://github.com/ClickHouse/ClickHouse/pull/67233) ([Nikita Taranov](https://github.com/nickitat)). +* Fix oss-fuzz build. [#67235](https://github.com/ClickHouse/ClickHouse/pull/67235) ([Nikita Taranov](https://github.com/nickitat)). +* Fix flaky 00180_no_seek_avoiding_when_reading_from_cache. [#67236](https://github.com/ClickHouse/ClickHouse/pull/67236) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* English. [#67258](https://github.com/ClickHouse/ClickHouse/pull/67258) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove strange code. [#67260](https://github.com/ClickHouse/ClickHouse/pull/67260) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix MSan report in DatabaseReplicated. [#67262](https://github.com/ClickHouse/ClickHouse/pull/67262) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test `02310_clickhouse_local_INSERT_progress_profile_events`. [#67264](https://github.com/ClickHouse/ClickHouse/pull/67264) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove test `02982_aggregation_states_destruction`. [#67266](https://github.com/ClickHouse/ClickHouse/pull/67266) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix OOM in test runs. [#67268](https://github.com/ClickHouse/ClickHouse/pull/67268) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove harmful stuff from tests. [#67275](https://github.com/ClickHouse/ClickHouse/pull/67275) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test `03201_variant_null_map_subcolumn`. [#67276](https://github.com/ClickHouse/ClickHouse/pull/67276) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Split `01651_lc_insert_tiny_log`. [#67279](https://github.com/ClickHouse/ClickHouse/pull/67279) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Debug test `02490_benchmark_max_consecutive_errors`. [#67281](https://github.com/ClickHouse/ClickHouse/pull/67281) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad test `02833_concurrrent_sessions`. [#67282](https://github.com/ClickHouse/ClickHouse/pull/67282) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a separate test for exception handling. [#67283](https://github.com/ClickHouse/ClickHouse/pull/67283) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Debug test `01600_parts_states_metrics_long`. [#67284](https://github.com/ClickHouse/ClickHouse/pull/67284) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Faster test `02231_buffer_aggregate_states_leak`. [#67285](https://github.com/ClickHouse/ClickHouse/pull/67285) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix curiosities in `TimerDescriptor`. [#67287](https://github.com/ClickHouse/ClickHouse/pull/67287) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add retries to test `02911_backup_restore_keeper_map`. [#67290](https://github.com/ClickHouse/ClickHouse/pull/67290) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Functions: Iterate over input_rows_count where appropriate. [#67294](https://github.com/ClickHouse/ClickHouse/pull/67294) ([Robert Schulze](https://github.com/rschu1ze)). +* Add documentation for `compile_expressions`. [#67300](https://github.com/ClickHouse/ClickHouse/pull/67300) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Wrap log lines in the CI report for functional tests. [#67301](https://github.com/ClickHouse/ClickHouse/pull/67301) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix bad test `02050_client_profile_events`. [#67309](https://github.com/ClickHouse/ClickHouse/pull/67309) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* I do not want to think about this code. [#67312](https://github.com/ClickHouse/ClickHouse/pull/67312) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test `00940_max_parts_in_total`. [#67313](https://github.com/ClickHouse/ClickHouse/pull/67313) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tests for Kafka cannot run in parallel. [#67315](https://github.com/ClickHouse/ClickHouse/pull/67315) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#66285](https://github.com/ClickHouse/ClickHouse/issues/66285). [#67325](https://github.com/ClickHouse/ClickHouse/pull/67325) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Follow-up for [#67301](https://github.com/ClickHouse/ClickHouse/issues/67301). [#67327](https://github.com/ClickHouse/ClickHouse/pull/67327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#61659](https://github.com/ClickHouse/ClickHouse/issues/61659). [#67332](https://github.com/ClickHouse/ClickHouse/pull/67332) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix integration test `test_backup_restore_on_cluster/test_disallow_concurrency`. [#67336](https://github.com/ClickHouse/ClickHouse/pull/67336) ([Vitaly Baranov](https://github.com/vitlibar)). +* Faster and less flaky 01246_buffer_flush (by using HTTP over clickhouse-client). [#67340](https://github.com/ClickHouse/ClickHouse/pull/67340) ([Azat Khuzhin](https://github.com/azat)). +* Fix: data race in TCPHandler on socket timeouts settings. [#67341](https://github.com/ClickHouse/ClickHouse/pull/67341) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* CI: Print stdout, stderr for docker pull command. [#67343](https://github.com/ClickHouse/ClickHouse/pull/67343) ([Max K.](https://github.com/maxknv)). +* Followup [#67290](https://github.com/ClickHouse/ClickHouse/issues/67290). [#67348](https://github.com/ClickHouse/ClickHouse/pull/67348) ([vdimir](https://github.com/vdimir)). +* Skip parallel for `test_storage_kerberized_kafka`. [#67349](https://github.com/ClickHouse/ClickHouse/pull/67349) ([Andrey Zvonov](https://github.com/zvonand)). +* Don't use PeekableReadBuffer in JSONAsObject format. [#67354](https://github.com/ClickHouse/ClickHouse/pull/67354) ([Kruglov Pavel](https://github.com/Avogar)). +* This closes: [#57316](https://github.com/ClickHouse/ClickHouse/issues/57316). [#67355](https://github.com/ClickHouse/ClickHouse/pull/67355) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Remove duplicated tests. [#67357](https://github.com/ClickHouse/ClickHouse/pull/67357) ([Kruglov Pavel](https://github.com/Avogar)). +* Release branch was not detected properly and job which must run on release branch could be reused from feature branches. PR Fixes detection of release branches. [#67358](https://github.com/ClickHouse/ClickHouse/pull/67358) ([Max K.](https://github.com/maxknv)). +* Disable some Dynamic tests under sanitizers, rewrite 03202_dynamic_null_map_subcolumn to sql. [#67359](https://github.com/ClickHouse/ClickHouse/pull/67359) ([Kruglov Pavel](https://github.com/Avogar)). +* Add no-distributed-cache tag in tests. [#67361](https://github.com/ClickHouse/ClickHouse/pull/67361) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Trying to fix test_cache_evicted_by_temporary_data and print debug info. [#67362](https://github.com/ClickHouse/ClickHouse/pull/67362) ([vdimir](https://github.com/vdimir)). +* Try to fix: ALL_CONNECTION_TRIES_FAILED with parallel replicas. [#67389](https://github.com/ClickHouse/ClickHouse/pull/67389) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix bad test `01036_no_superfluous_dict_reload_on_create_database`. [#67390](https://github.com/ClickHouse/ClickHouse/pull/67390) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Adding `SelectedPartsTotal` and `SelectedMarksTotal` as new ProfileEvents. [#67393](https://github.com/ClickHouse/ClickHouse/pull/67393) ([Jordi Villar](https://github.com/jrdi)). +* Print debug info in `test_storage_s3_queue/test.py::test_shards_distributed`. [#67394](https://github.com/ClickHouse/ClickHouse/pull/67394) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Mute degraded perf test. [#67396](https://github.com/ClickHouse/ClickHouse/pull/67396) ([Nikita Taranov](https://github.com/nickitat)). +* Debug TimerDescriptor. [#67397](https://github.com/ClickHouse/ClickHouse/pull/67397) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove has_single_port property from plan stream. [#67398](https://github.com/ClickHouse/ClickHouse/pull/67398) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix typo. [#67400](https://github.com/ClickHouse/ClickHouse/pull/67400) ([Halersson Paris](https://github.com/halersson)). +* CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)). +* Disable 02932_refreshable_materialized_views. [#67404](https://github.com/ClickHouse/ClickHouse/pull/67404) ([Michael Kolupaev](https://github.com/al13n321)). +* Follow-up to [#67294](https://github.com/ClickHouse/ClickHouse/issues/67294). [#67405](https://github.com/ClickHouse/ClickHouse/pull/67405) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix DWARF range list parsing in stack symbolizer. [#67417](https://github.com/ClickHouse/ClickHouse/pull/67417) ([Michael Kolupaev](https://github.com/al13n321)). +* Make Dwarf::findAddress() fallback slow path less slow. [#67418](https://github.com/ClickHouse/ClickHouse/pull/67418) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix inconsistent formatting of CODEC and STATISTICS. [#67421](https://github.com/ClickHouse/ClickHouse/pull/67421) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Reduced complexity of the test 02832_alter_max_sessions_for_user. [#67425](https://github.com/ClickHouse/ClickHouse/pull/67425) ([Alexey Gerasimchuck](https://github.com/Demilivor)). +* Remove obsolete `--multiquery` parameter from tests. [#67435](https://github.com/ClickHouse/ClickHouse/pull/67435) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix public backports. [#67439](https://github.com/ClickHouse/ClickHouse/pull/67439) ([Raúl Marín](https://github.com/Algunenano)). +* Bump Azure from v1.12 to v1.13. [#67446](https://github.com/ClickHouse/ClickHouse/pull/67446) ([Robert Schulze](https://github.com/rschu1ze)). +* 24.7 add missing documentation and testing. [#67454](https://github.com/ClickHouse/ClickHouse/pull/67454) ([Nikita Fomichev](https://github.com/fm4v)). +* Use correct order of fields in `StorageURLSource`. [#67455](https://github.com/ClickHouse/ClickHouse/pull/67455) ([Antonio Andelic](https://github.com/antonio2368)). +* run 01171 test in parallel. [#67470](https://github.com/ClickHouse/ClickHouse/pull/67470) ([Sema Checherinda](https://github.com/CheSema)). +* [Green CI] Fix WriteBuffer destructor when finalize has failed for MergeTreeDeduplicationLog::shutdown. [#67474](https://github.com/ClickHouse/ClickHouse/pull/67474) ([Alexey Katsman](https://github.com/alexkats)). +* Reduce 02473_multistep_prewhere run time. [#67475](https://github.com/ClickHouse/ClickHouse/pull/67475) ([Alexander Gololobov](https://github.com/davenger)). +* Update version_date.tsv and changelogs after v24.7.1.2915-stable. [#67483](https://github.com/ClickHouse/ClickHouse/pull/67483) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Revert [#61750](https://github.com/ClickHouse/ClickHouse/issues/61750) "Improve JSONEachRow reading by ignoring the keys case". [#67484](https://github.com/ClickHouse/ClickHouse/pull/67484) ([Michael Kolupaev](https://github.com/al13n321)). +* Disable parallel run for `01923_network_receive_time_metric_insert.sh`. [#67492](https://github.com/ClickHouse/ClickHouse/pull/67492) ([Julia Kartseva](https://github.com/jkartseva)). +* Fix test `test_backup_restore_on_cluster/test.py::test_mutation`. [#67494](https://github.com/ClickHouse/ClickHouse/pull/67494) ([Vitaly Baranov](https://github.com/vitlibar)). +* [Green CI] Fix potentially flaky test_mask_sensitive_info integration test. [#67506](https://github.com/ClickHouse/ClickHouse/pull/67506) ([Alexey Katsman](https://github.com/alexkats)). +* [Green CI] Test `test_storage_azure_blob_storage/test.py` is flaky. [#67512](https://github.com/ClickHouse/ClickHouse/pull/67512) ([Daniil Ivanik](https://github.com/divanik)). +* Prepare Release workflow for production. [#67523](https://github.com/ClickHouse/ClickHouse/pull/67523) ([Max K.](https://github.com/maxknv)). +* Fix upgrade check. [#67524](https://github.com/ClickHouse/ClickHouse/pull/67524) ([Raúl Marín](https://github.com/Algunenano)). +* [Green CI] test 03164_s3_settings_for_queries_and_merges is flaky. [#67535](https://github.com/ClickHouse/ClickHouse/pull/67535) ([Daniil Ivanik](https://github.com/divanik)). +* Log message and increased concurrency for table removal. [#67537](https://github.com/ClickHouse/ClickHouse/pull/67537) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix AsyncLoader destruction race. [#67553](https://github.com/ClickHouse/ClickHouse/pull/67553) ([Sergei Trifonov](https://github.com/serxa)). +* Add an assert into TimerDescriptor. [#67555](https://github.com/ClickHouse/ClickHouse/pull/67555) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Re-enable ICU on s390/x. [#67557](https://github.com/ClickHouse/ClickHouse/pull/67557) ([Robert Schulze](https://github.com/rschu1ze)). +* Update version_date.tsv and changelogs after v24.4.4.107-stable. [#67559](https://github.com/ClickHouse/ClickHouse/pull/67559) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Sometimes we fail with timeout in stateless tests and the reason for that seems to be in `stop_logs_replication` step. Add a check for timeout here. [#67560](https://github.com/ClickHouse/ClickHouse/pull/67560) ([Nikolay Degterinsky](https://github.com/evillique)). +* Miscellaneous. [#67564](https://github.com/ClickHouse/ClickHouse/pull/67564) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* enable parallel_view_processing in perf tests. [#67565](https://github.com/ClickHouse/ClickHouse/pull/67565) ([Sema Checherinda](https://github.com/CheSema)). +* Fix flaky `test_system_kafka_consumers_rebalance`. [#67566](https://github.com/ClickHouse/ClickHouse/pull/67566) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Update version_date.tsv and changelogs after v24.7.2.13-stable. [#67586](https://github.com/ClickHouse/ClickHouse/pull/67586) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Fix 01811_storage_buffer_flush_parameters flakiness. [#67589](https://github.com/ClickHouse/ClickHouse/pull/67589) ([Azat Khuzhin](https://github.com/azat)). +* Fix test_zookeeper_config_load_balancing after adding the xdist worker name to the instance. [#67590](https://github.com/ClickHouse/ClickHouse/pull/67590) ([Pablo Marcos](https://github.com/pamarcos)). +* Update minio in integration tests. [#67595](https://github.com/ClickHouse/ClickHouse/pull/67595) ([Antonio Andelic](https://github.com/antonio2368)). +* added tests for page index in parquet files. [#67596](https://github.com/ClickHouse/ClickHouse/pull/67596) ([max-vostrikov](https://github.com/max-vostrikov)). +* Update check_rabbitmq_is_available. [#67597](https://github.com/ClickHouse/ClickHouse/pull/67597) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix 02434_cancel_insert_when_client_dies. [#67600](https://github.com/ClickHouse/ClickHouse/pull/67600) ([vdimir](https://github.com/vdimir)). +* Fix 02910_bad_logs_level_in_local in fast tests. [#67603](https://github.com/ClickHouse/ClickHouse/pull/67603) ([Raúl Marín](https://github.com/Algunenano)). +* Fix 01605_adaptive_granularity_block_borders. [#67605](https://github.com/ClickHouse/ClickHouse/pull/67605) ([Nikita Taranov](https://github.com/nickitat)). +* Update CHANGELOG.md. [#67607](https://github.com/ClickHouse/ClickHouse/pull/67607) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove some `no-parallel` tags from tests. [#67610](https://github.com/ClickHouse/ClickHouse/pull/67610) ([Raúl Marín](https://github.com/Algunenano)). +* Update README.md. [#67613](https://github.com/ClickHouse/ClickHouse/pull/67613) ([Tyler Hannan](https://github.com/tylerhannan)). +* Try fix 03143_asof_join_ddb_long. [#67620](https://github.com/ClickHouse/ClickHouse/pull/67620) ([Nikita Taranov](https://github.com/nickitat)). +* Don't run ASAN unit tests under gdb. [#67622](https://github.com/ClickHouse/ClickHouse/pull/67622) ([Raúl Marín](https://github.com/Algunenano)). +* Fix crash in KeyCondition::cloneASTWithInversionPushDown() caused by type change. [#67641](https://github.com/ClickHouse/ClickHouse/pull/67641) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix race condition between ProcessList and Settings. [#67645](https://github.com/ClickHouse/ClickHouse/pull/67645) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* `02481_async_insert_race_long.sh` flakiness fixes. [#67650](https://github.com/ClickHouse/ClickHouse/pull/67650) ([Julia Kartseva](https://github.com/jkartseva)). +* Fixes [#67651](https://github.com/ClickHouse/ClickHouse/issues/67651). [#67653](https://github.com/ClickHouse/ClickHouse/pull/67653) ([pufit](https://github.com/pufit)). +* Fix flaky `test_replicated_table_attach`. [#67658](https://github.com/ClickHouse/ClickHouse/pull/67658) ([Antonio Andelic](https://github.com/antonio2368)). +* Update version_date.tsv and changelogs after v24.4.4.113-stable. [#67659](https://github.com/ClickHouse/ClickHouse/pull/67659) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Even better healthcheck for ldap. [#67667](https://github.com/ClickHouse/ClickHouse/pull/67667) ([Andrey Zvonov](https://github.com/zvonand)). +* Fix 03203_client_benchmark_options. [#67671](https://github.com/ClickHouse/ClickHouse/pull/67671) ([vdimir](https://github.com/vdimir)). +* Integration tests: fix ports clashing problem. [#67672](https://github.com/ClickHouse/ClickHouse/pull/67672) ([Nikita Fomichev](https://github.com/fm4v)). +* Remove some `no-parallel` tags from tests (Part 2). [#67673](https://github.com/ClickHouse/ClickHouse/pull/67673) ([Raúl Marín](https://github.com/Algunenano)). +* Use FunctionArgumentDescriptors for bitSlice. [#67674](https://github.com/ClickHouse/ClickHouse/pull/67674) ([Lennard Eijsackers](https://github.com/Blokje5)). +* Update version_date.tsv and changelog after v24.3.6.48-lts. [#67677](https://github.com/ClickHouse/ClickHouse/pull/67677) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Avoid ddl queue timeout in 02313_filesystem_cache_seeks. [#67680](https://github.com/ClickHouse/ClickHouse/pull/67680) ([Nikita Taranov](https://github.com/nickitat)). +* Fix bad log message in sort description. [#67690](https://github.com/ClickHouse/ClickHouse/pull/67690) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update version_date.tsv and changelog after v23.8.16.40-lts. [#67692](https://github.com/ClickHouse/ClickHouse/pull/67692) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Fix check names in test reports and the CI Logs database. [#67696](https://github.com/ClickHouse/ClickHouse/pull/67696) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* CI: Minor refactoring in ci_utils. [#67706](https://github.com/ClickHouse/ClickHouse/pull/67706) ([Max K.](https://github.com/maxknv)). +* Fix 01042_system_reload_dictionary_reloads_completely flakiness. [#67719](https://github.com/ClickHouse/ClickHouse/pull/67719) ([Azat Khuzhin](https://github.com/azat)). +* Fix test `00002_log_and_exception_messages_formatting`. [#67723](https://github.com/ClickHouse/ClickHouse/pull/67723) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test `02789_reading_from_s3_with_connection_pool`. [#67726](https://github.com/ClickHouse/ClickHouse/pull/67726) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix strange code in HostResolvePool. [#67727](https://github.com/ClickHouse/ClickHouse/pull/67727) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix a typo. [#67729](https://github.com/ClickHouse/ClickHouse/pull/67729) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Smart handling of processes leftovers in tests. [#67737](https://github.com/ClickHouse/ClickHouse/pull/67737) ([Azat Khuzhin](https://github.com/azat)). +* Fix test retries. [#67738](https://github.com/ClickHouse/ClickHouse/pull/67738) ([Azat Khuzhin](https://github.com/azat)). +* Fill only selected columns from system.clusters. [#67739](https://github.com/ClickHouse/ClickHouse/pull/67739) ([Azat Khuzhin](https://github.com/azat)). +* Bump NuRaft (to properly catch thread exceptions). [#67740](https://github.com/ClickHouse/ClickHouse/pull/67740) ([Azat Khuzhin](https://github.com/azat)). +* Try to fix RabbitMQ test failures. [#67743](https://github.com/ClickHouse/ClickHouse/pull/67743) ([Azat Khuzhin](https://github.com/azat)). +* Stateless tests: attempt to fix timeouts of `02473_multistep_prewhere* 00411_long_accurate_number_comparison*`. [#67746](https://github.com/ClickHouse/ClickHouse/pull/67746) ([Nikita Fomichev](https://github.com/fm4v)). +* Fix test_ttl_move::test_alter_with_merge_work flakiness. [#67747](https://github.com/ClickHouse/ClickHouse/pull/67747) ([Azat Khuzhin](https://github.com/azat)). +* ci: better stateless runner (correctly collect artifacts and also some basic errors capturing). [#67752](https://github.com/ClickHouse/ClickHouse/pull/67752) ([Azat Khuzhin](https://github.com/azat)). +* Introduce `no-flaky-check` tag. [#67755](https://github.com/ClickHouse/ClickHouse/pull/67755) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Whitespaces. [#67771](https://github.com/ClickHouse/ClickHouse/pull/67771) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* [RFC] Print original query for AST formatting check on CI. [#67776](https://github.com/ClickHouse/ClickHouse/pull/67776) ([Azat Khuzhin](https://github.com/azat)). +* Fix test `02833_concurrent_sessions`, Fix test `02835_drop_user_during_session`. [#67779](https://github.com/ClickHouse/ClickHouse/pull/67779) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix diagnostics in the test script. [#67780](https://github.com/ClickHouse/ClickHouse/pull/67780) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test `02231_bloom_filter_sizing`. [#67784](https://github.com/ClickHouse/ClickHouse/pull/67784) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fixed session_log related tests race condition on logout. [#67785](https://github.com/ClickHouse/ClickHouse/pull/67785) ([Alexey Gerasimchuck](https://github.com/Demilivor)). +* ci/stateless: fix artifacts post-processing and detect if something failed there. [#67791](https://github.com/ClickHouse/ClickHouse/pull/67791) ([Azat Khuzhin](https://github.com/azat)). +* Integration tests: fix flaky `test_dictionaries_update_and_reload::test_reload_after_fail_by_timer`. [#67793](https://github.com/ClickHouse/ClickHouse/pull/67793) ([Nikita Fomichev](https://github.com/fm4v)). +* Fix possible CANNOT_READ_ALL_DATA during server startup in performance tests. [#67795](https://github.com/ClickHouse/ClickHouse/pull/67795) ([Azat Khuzhin](https://github.com/azat)). +* Reduce table size in 03037_dynamic_merges_2* tests. [#67797](https://github.com/ClickHouse/ClickHouse/pull/67797) ([Kruglov Pavel](https://github.com/Avogar)). +* Disable 03038_nested_dynamic_merges* under sanitizers because it's too slow. [#67798](https://github.com/ClickHouse/ClickHouse/pull/67798) ([Kruglov Pavel](https://github.com/Avogar)). +* Revert "Merge pull request [#66510](https://github.com/ClickHouse/ClickHouse/issues/66510) from canhld94/fix_trivial_count_non_deterministic_func". [#67800](https://github.com/ClickHouse/ClickHouse/pull/67800) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Update comment. [#67801](https://github.com/ClickHouse/ClickHouse/pull/67801) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix bad test `03032_redundant_equals`. [#67822](https://github.com/ClickHouse/ClickHouse/pull/67822) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update tags for a few tests. [#67829](https://github.com/ClickHouse/ClickHouse/pull/67829) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add debug logging for window view tests. [#67841](https://github.com/ClickHouse/ClickHouse/pull/67841) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Closes [#67621](https://github.com/ClickHouse/ClickHouse/issues/67621). [#67843](https://github.com/ClickHouse/ClickHouse/pull/67843) ([Ilya Yatsishin](https://github.com/qoega)). +* Fix query cache randomization in stress tests. [#67855](https://github.com/ClickHouse/ClickHouse/pull/67855) ([Azat Khuzhin](https://github.com/azat)). +* Update version_date.tsv and changelogs after v24.5.5.78-stable. [#67863](https://github.com/ClickHouse/ClickHouse/pull/67863) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Un-flake 02524_fuzz_and_fuss_2. [#67867](https://github.com/ClickHouse/ClickHouse/pull/67867) ([Robert Schulze](https://github.com/rschu1ze)). +* Misc fixes. [#67869](https://github.com/ClickHouse/ClickHouse/pull/67869) ([Alexey Katsman](https://github.com/alexkats)). +* Fixes [#67444](https://github.com/ClickHouse/ClickHouse/issues/67444). [#67873](https://github.com/ClickHouse/ClickHouse/pull/67873) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* no-msan 00314_sample_factor_virtual_column. [#67874](https://github.com/ClickHouse/ClickHouse/pull/67874) ([Michael Kolupaev](https://github.com/al13n321)). +* Revert "Revert "Add a test for [#47892](https://github.com/ClickHouse/ClickHouse/issues/47892)"". [#67877](https://github.com/ClickHouse/ClickHouse/pull/67877) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Lazily create invalid files in S3. [#67882](https://github.com/ClickHouse/ClickHouse/pull/67882) ([Antonio Andelic](https://github.com/antonio2368)). +* Do not try to create azure container if not needed. [#67896](https://github.com/ClickHouse/ClickHouse/pull/67896) ([Anton Popov](https://github.com/CurtizJ)). +* CI: Fix for setting Mergeable Check from sync. [#67898](https://github.com/ClickHouse/ClickHouse/pull/67898) ([Max K.](https://github.com/maxknv)). +* Bump rocksdb from v8.10 to v9.4 + enable jemalloc and liburing. [#67904](https://github.com/ClickHouse/ClickHouse/pull/67904) ([Robert Schulze](https://github.com/rschu1ze)). +* Update version_date.tsv and changelogs after v24.6.3.95-stable. [#67910](https://github.com/ClickHouse/ClickHouse/pull/67910) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Remove some no-parallel tags from tests (Part 3). [#67914](https://github.com/ClickHouse/ClickHouse/pull/67914) ([Raúl Marín](https://github.com/Algunenano)). +* Follow up [#67235](https://github.com/ClickHouse/ClickHouse/issues/67235). [#67917](https://github.com/ClickHouse/ClickHouse/pull/67917) ([Nikita Taranov](https://github.com/nickitat)). +* CI: Changelog: Critical Bug Fix to Bug Fix. [#67919](https://github.com/ClickHouse/ClickHouse/pull/67919) ([Max K.](https://github.com/maxknv)). +* CI: Multi-channel CiBuddy. [#67923](https://github.com/ClickHouse/ClickHouse/pull/67923) ([Max K.](https://github.com/maxknv)). +* more logs to debug logical error from async inserts. [#67928](https://github.com/ClickHouse/ClickHouse/pull/67928) ([Han Fei](https://github.com/hanfei1991)). +* Fix stress test error with TDigest statistics. [#67930](https://github.com/ClickHouse/ClickHouse/pull/67930) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove some no-parallel tags from tests (Part 4). [#67932](https://github.com/ClickHouse/ClickHouse/pull/67932) ([Raúl Marín](https://github.com/Algunenano)). +* Upgrade QPL to v1.6.0. [#67933](https://github.com/ClickHouse/ClickHouse/pull/67933) ([Maria Zhukova](https://github.com/mzhukova)). +* CI: Strict job timeout 1.5h for tests, 2h for builds. [#67934](https://github.com/ClickHouse/ClickHouse/pull/67934) ([Max K.](https://github.com/maxknv)). +* Remove slow tests from fasttest check. [#67941](https://github.com/ClickHouse/ClickHouse/pull/67941) ([Raúl Marín](https://github.com/Algunenano)). +* Fix memory corruption in usearch. [#67942](https://github.com/ClickHouse/ClickHouse/pull/67942) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#68547](https://github.com/ClickHouse/ClickHouse/issues/68547): Timeout handling for functional and integration tests, store artifacts and report if timed out - sets 2h default timeout for all jobs. [#67944](https://github.com/ClickHouse/ClickHouse/pull/67944) ([Max K.](https://github.com/maxknv)). +* Unflake 02099_tsv_raw_format.sh. [#67947](https://github.com/ClickHouse/ClickHouse/pull/67947) ([Robert Schulze](https://github.com/rschu1ze)). +* This closes: [#67866](https://github.com/ClickHouse/ClickHouse/issues/67866). [#67950](https://github.com/ClickHouse/ClickHouse/pull/67950) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Change log level of an insignificant message in clickhouse-local. [#67952](https://github.com/ClickHouse/ClickHouse/pull/67952) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)). +* Fix flaky `test_storage_s3_queue/test.py::test_multiple_tables_streaming_sync_distributed`. [#67959](https://github.com/ClickHouse/ClickHouse/pull/67959) ([Julia Kartseva](https://github.com/jkartseva)). +* tests: fix 03002_part_log_rmt_fetch_merge_error flakiness. [#67960](https://github.com/ClickHouse/ClickHouse/pull/67960) ([Azat Khuzhin](https://github.com/azat)). +* Fix timeout of 02310_clickhouse_local_INSERT_progress_profile_events. [#67961](https://github.com/ClickHouse/ClickHouse/pull/67961) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove obsolete `--multiquery` parameter (follow-up to [#63898](https://github.com/ClickHouse/ClickHouse/issues/63898)), pt. III. [#67964](https://github.com/ClickHouse/ClickHouse/pull/67964) ([Robert Schulze](https://github.com/rschu1ze)). +* Update minio in stateless tests. [#67975](https://github.com/ClickHouse/ClickHouse/pull/67975) ([Antonio Andelic](https://github.com/antonio2368)). +* CI: Integration tests uncover some logging. [#67978](https://github.com/ClickHouse/ClickHouse/pull/67978) ([Max K.](https://github.com/maxknv)). +* Fix 03130_convert_outer_join_to_inner_join. [#67980](https://github.com/ClickHouse/ClickHouse/pull/67980) ([vdimir](https://github.com/vdimir)). +* Collect minio audit logs in stateless tests. [#67998](https://github.com/ClickHouse/ClickHouse/pull/67998) ([Antonio Andelic](https://github.com/antonio2368)). +* Remove some no-parallel tags from tests (Part 5). [#68002](https://github.com/ClickHouse/ClickHouse/pull/68002) ([Raúl Marín](https://github.com/Algunenano)). +* Minor fixes in tables.md. [#68004](https://github.com/ClickHouse/ClickHouse/pull/68004) ([Ilya Yatsishin](https://github.com/qoega)). +* Follow up for [#67843](https://github.com/ClickHouse/ClickHouse/issues/67843). [#68007](https://github.com/ClickHouse/ClickHouse/pull/68007) ([Ilya Yatsishin](https://github.com/qoega)). +* Remove unused CLI option. [#68008](https://github.com/ClickHouse/ClickHouse/pull/68008) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test `02845_threads_count_in_distributed_queries`. [#68011](https://github.com/ClickHouse/ClickHouse/pull/68011) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* CI: Pass job timeout into tests config. [#68013](https://github.com/ClickHouse/ClickHouse/pull/68013) ([Nikita Fomichev](https://github.com/fm4v)). +* Add a test for [#57420](https://github.com/ClickHouse/ClickHouse/issues/57420). [#68017](https://github.com/ClickHouse/ClickHouse/pull/68017) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Revert "Revert "Bump rocksdb from v8.10 to v9.4 + enable jemalloc and liburing"". [#68021](https://github.com/ClickHouse/ClickHouse/pull/68021) ([Robert Schulze](https://github.com/rschu1ze)). +* CI: Fix for filtering jobs in PRs. [#68022](https://github.com/ClickHouse/ClickHouse/pull/68022) ([Max K.](https://github.com/maxknv)). +* Docs: Update 3rd party library guide. [#68027](https://github.com/ClickHouse/ClickHouse/pull/68027) ([Robert Schulze](https://github.com/rschu1ze)). +* Refactor tests for (experimental) statistics. [#68034](https://github.com/ClickHouse/ClickHouse/pull/68034) ([Robert Schulze](https://github.com/rschu1ze)). +* Split `00284_external_aggregation.sql`. [#68037](https://github.com/ClickHouse/ClickHouse/pull/68037) ([Robert Schulze](https://github.com/rschu1ze)). +* Update version_date.tsv and changelog after v24.7.3.42-stable. [#68045](https://github.com/ClickHouse/ClickHouse/pull/68045) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update test_drop_is_lock_free/test.py. [#68051](https://github.com/ClickHouse/ClickHouse/pull/68051) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fixes [#67865](https://github.com/ClickHouse/ClickHouse/issues/67865). [#68054](https://github.com/ClickHouse/ClickHouse/pull/68054) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Disable randomization of `trace_profile_events` in clickhouse-test. [#68058](https://github.com/ClickHouse/ClickHouse/pull/68058) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Minor CMake cleanup. [#68069](https://github.com/ClickHouse/ClickHouse/pull/68069) ([Robert Schulze](https://github.com/rschu1ze)). +* If the test cluster is overloaded, sometimes simple query execution [can take more time](https://pastila.nl/?00224e71/f017cd6675b52ccc205c81aa62a47de5#8dB4+C4MOdOi3NLV1dc0Fg==) than `Buffer`'s max time to flush. This PR doubles the timeout and allows to skip the check in case of significant latency. [#68072](https://github.com/ClickHouse/ClickHouse/pull/68072) ([pufit](https://github.com/pufit)). +* Fix flaky `02675_profile_events_from_query_log_and_client`. [#68097](https://github.com/ClickHouse/ClickHouse/pull/68097) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix race in `WithRetries`. [#68106](https://github.com/ClickHouse/ClickHouse/pull/68106) ([Antonio Andelic](https://github.com/antonio2368)). +* Add empty cell to reports when time is missing. [#68112](https://github.com/ClickHouse/ClickHouse/pull/68112) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix test `00900_long_parquet_load`. [#68130](https://github.com/ClickHouse/ClickHouse/pull/68130) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* tests: fix 01246_buffer_flush flakiness due to slow trace_log flush. [#68134](https://github.com/ClickHouse/ClickHouse/pull/68134) ([Azat Khuzhin](https://github.com/azat)). +* Only use Field::safeGet - Field::get prone to type punning. [#68135](https://github.com/ClickHouse/ClickHouse/pull/68135) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* tests: attempt to fix 01600_parts_states_metrics_long (by forbid parallel run). [#68136](https://github.com/ClickHouse/ClickHouse/pull/68136) ([Azat Khuzhin](https://github.com/azat)). +* Fix01710 Timeout. [#68138](https://github.com/ClickHouse/ClickHouse/pull/68138) ([jsc0218](https://github.com/jsc0218)). +* Remove the extra cell from reports when it is not necessary. [#68145](https://github.com/ClickHouse/ClickHouse/pull/68145) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Remove "Processing configuration file" message from clickhouse-local. [#68157](https://github.com/ClickHouse/ClickHouse/pull/68157) ([Azat Khuzhin](https://github.com/azat)). +* tests: fix 02122_join_group_by_timeout flakiness. [#68160](https://github.com/ClickHouse/ClickHouse/pull/68160) ([Azat Khuzhin](https://github.com/azat)). +* Fix `test_cluster_all_replicas`. [#68178](https://github.com/ClickHouse/ClickHouse/pull/68178) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix leftovers. [#68181](https://github.com/ClickHouse/ClickHouse/pull/68181) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix test `01172_transaction_counters`. [#68182](https://github.com/ClickHouse/ClickHouse/pull/68182) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Refactor tests for (experimental) statistics. [#68186](https://github.com/ClickHouse/ClickHouse/pull/68186) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove Log engine from Kafka integration tests. [#68200](https://github.com/ClickHouse/ClickHouse/pull/68200) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* [Green CI] Fix test 01903_correct_block_size_prediction_with_default. [#68203](https://github.com/ClickHouse/ClickHouse/pull/68203) ([Pablo Marcos](https://github.com/pamarcos)). +* Replace segfault in Replicated database with logical error. [#68250](https://github.com/ClickHouse/ClickHouse/pull/68250) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backported in [#68423](https://github.com/ClickHouse/ClickHouse/issues/68423): tests: make 01600_parts_states_metrics_long better. [#68265](https://github.com/ClickHouse/ClickHouse/pull/68265) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#68374](https://github.com/ClickHouse/ClickHouse/issues/68374): Rename: S3DiskNoKeyErrors -> DiskS3NoSuchKeyErrors. [#68361](https://github.com/ClickHouse/ClickHouse/pull/68361) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Backported in [#68637](https://github.com/ClickHouse/ClickHouse/issues/68637): Check for invalid regexp in JSON SKIP REGEXP section. [#68451](https://github.com/ClickHouse/ClickHouse/pull/68451) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#68485](https://github.com/ClickHouse/ClickHouse/issues/68485): Better inference of date times 2. [#68452](https://github.com/ClickHouse/ClickHouse/pull/68452) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#68540](https://github.com/ClickHouse/ClickHouse/issues/68540): CI: Native build for package_aarch64. [#68457](https://github.com/ClickHouse/ClickHouse/pull/68457) ([Max K.](https://github.com/maxknv)). +* Backported in [#68518](https://github.com/ClickHouse/ClickHouse/issues/68518): Minor update in Dynamic/JSON serializations. [#68459](https://github.com/ClickHouse/ClickHouse/pull/68459) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#68558](https://github.com/ClickHouse/ClickHouse/issues/68558): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)). +* Backported in [#68576](https://github.com/ClickHouse/ClickHouse/issues/68576): CI: Tidy build timeout from 2h to 3h. [#68567](https://github.com/ClickHouse/ClickHouse/pull/68567) ([Max K.](https://github.com/maxknv)). + diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 6cb36e2049b..4cc7563135a 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -91,6 +91,28 @@ SELECT 1 In addition to the above settings, you can use `USE_*` flags from `system.build_options` to define usage of particular ClickHouse features. For example, if your test uses a MySQL table, you should add a tag `use-mysql`. +### Specifying limits for random settings + +A test can specify minimum and maximum allowed values for settings that can be randomized during test run. + +For `.sh` tests limits are written as a comment on the line next to tags or on the second line if no tags are specified: + +```bash +#!/usr/bin/env bash +# Tags: no-fasttest +# Random settings limits: max_block_size=(1000, 10000); index_granularity=(100, None) +``` + +For `.sql` tests tags are placed as a SQL comment in the line next to tags or in the first line: + +```sql +-- Tags: no-fasttest +-- Random settings limits: max_block_size=(1000, 10000); index_granularity=(100, None) +SELECT 1 +``` + +If you need to specify only one limit, you can use `None` for another one. + ### Choosing the Test Name The name of the test starts with a five-digit prefix followed by a descriptive name, such as `00422_hash_function_constexpr.sql`. To choose the prefix, find the largest prefix already present in the directory, and increment it by one. In the meantime, some other tests might be added with the same numeric prefix, but this is OK and does not lead to any problems, you don't have to change it later. diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md index c9df713231a..404cec97def 100644 --- a/docs/en/engines/table-engines/integrations/hdfs.md +++ b/docs/en/engines/table-engines/integrations/hdfs.md @@ -240,7 +240,7 @@ libhdfs3 support HDFS namenode HA. ## Storage Settings {#storage-settings} - [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. -- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default. - [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default. **See Also** diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index d664c37bd0f..48a08dfa499 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -225,7 +225,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32) ## Storage Settings {#storage-settings} - [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. -- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default. - [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default. ## S3-related Settings {#settings} diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index e73d6f07a32..1057ccb5fee 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -22,10 +22,10 @@ ORDER BY Distance(vectors, Point) LIMIT N ``` -`vectors` contains N-dimensional values of type [Array(Float32)](../../../sql-reference/data-types/array.md), for example embeddings. -Function `Distance` computes the distance between two vectors. Often, the Euclidean (L2) distance is chosen as distance function but [other -distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17, -0.33, ...)`, and `N` limits the number of search results. +`vectors` contains N-dimensional values of type [Array(Float32)](../../../sql-reference/data-types/array.md) or Array(Float64), for example +embeddings. Function `Distance` computes the distance between two vectors. Often, the Euclidean (L2) distance is chosen as distance function +but [other distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, +e.g. `(0.17, 0.33, ...)`, and `N` limits the number of search results. This query returns the top-`N` closest points to the reference point. Parameter `N` limits the number of returned values which is useful for situations where `MaxDistance` is difficult to determine in advance. @@ -59,6 +59,8 @@ Parameters: - `ef_construction`: (optional, default: 128) - `ef_search`: (optional, default: 64) +Value 0 for parameters `m`, `ef_construction`, and `ef_search` refers to the default value. + Example: ```sql diff --git a/docs/en/interfaces/schema-inference.md b/docs/en/interfaces/schema-inference.md index 05fae994cbe..4afba20d76c 100644 --- a/docs/en/interfaces/schema-inference.md +++ b/docs/en/interfaces/schema-inference.md @@ -359,13 +359,14 @@ DESC format(JSONEachRow, '{"int" : 42, "float" : 42.42, "string" : "Hello, World Dates, DateTimes: ```sql -DESC format(JSONEachRow, '{"date" : "2022-01-01", "datetime" : "2022-01-01 00:00:00"}') +DESC format(JSONEachRow, '{"date" : "2022-01-01", "datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"}') ``` ```response -┌─name─────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ date │ Nullable(Date) │ │ │ │ │ │ -│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │ -└──────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ +│ date │ Nullable(Date) │ │ │ │ │ │ +│ datetime │ Nullable(DateTime) │ │ │ │ │ │ +│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` Arrays: @@ -759,12 +760,13 @@ DESC format(CSV, 'Hello world!,World hello!') Dates, DateTimes: ```sql -DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00"') +DESC format(CSV, '"2020-01-01","2020-01-01 00:00:00","2022-01-01 00:00:00.000"') ``` ```response ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ │ c1 │ Nullable(Date) │ │ │ │ │ │ -│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +│ c2 │ Nullable(DateTime) │ │ │ │ │ │ +│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` @@ -956,12 +958,13 @@ DESC format(TSKV, 'int=42 float=42.42 bool=true string=Hello,World!\n') Dates, DateTimes: ```sql -DESC format(TSV, '2020-01-01 2020-01-01 00:00:00') +DESC format(TSV, '2020-01-01 2020-01-01 00:00:00 2022-01-01 00:00:00.000') ``` ```response ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ │ c1 │ Nullable(Date) │ │ │ │ │ │ -│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +│ c2 │ Nullable(DateTime) │ │ │ │ │ │ +│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` @@ -1126,12 +1129,13 @@ DESC format(Values, $$(42, 42.42, true, 'Hello,World!')$$) Dates, DateTimes: ```sql -DESC format(Values, $$('2020-01-01', '2020-01-01 00:00:00')$$) -``` + DESC format(Values, $$('2020-01-01', '2020-01-01 00:00:00', '2022-01-01 00:00:00.000')$$) + ``` ```response ┌─name─┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ │ c1 │ Nullable(Date) │ │ │ │ │ │ -│ c2 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +│ c2 │ Nullable(DateTime) │ │ │ │ │ │ +│ c3 │ Nullable(DateTime64(9)) │ │ │ │ │ │ └──────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` @@ -1504,8 +1508,8 @@ DESC format(JSONEachRow, $$ #### input_format_try_infer_datetimes -If enabled, ClickHouse will try to infer type `DateTime64` from string fields in schema inference for text formats. -If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime64(9)`, +If enabled, ClickHouse will try to infer type `DateTime` or `DateTime64` from string fields in schema inference for text formats. +If all fields from a column in sample data were successfully parsed as datetimes, the result type will be `DateTime` or `DateTime64(9)` (if any datetime had fractional part), if at least one field was not parsed as datetime, the result type will be `String`. Enabled by default. @@ -1513,39 +1517,66 @@ Enabled by default. **Examples** ```sql -SET input_format_try_infer_datetimes = 0 +SET input_format_try_infer_datetimes = 0; DESC format(JSONEachRow, $$ - {"datetime" : "2021-01-01 00:00:00.000"} - {"datetime" : "2022-01-01 00:00:00.000"} + {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"} + {"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"} $$) ``` ```response -┌─name─────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ datetime │ Nullable(String) │ │ │ │ │ │ -└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +┌─name───────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ +│ datetime │ Nullable(String) │ │ │ │ │ │ +│ datetime64 │ Nullable(String) │ │ │ │ │ │ +└────────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` ```sql -SET input_format_try_infer_datetimes = 1 +SET input_format_try_infer_datetimes = 1; DESC format(JSONEachRow, $$ - {"datetime" : "2021-01-01 00:00:00.000"} - {"datetime" : "2022-01-01 00:00:00.000"} + {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"} + {"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"} $$) ``` ```response -┌─name─────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │ -└──────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ +│ datetime │ Nullable(DateTime) │ │ │ │ │ │ +│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` ```sql DESC format(JSONEachRow, $$ - {"datetime" : "2021-01-01 00:00:00.000"} - {"datetime" : "unknown"} + {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"} + {"datetime" : "unknown", "datetime64" : "unknown"} $$) ``` ```response -┌─name─────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ -│ datetime │ Nullable(String) │ │ │ │ │ │ -└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +┌─name───────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ +│ datetime │ Nullable(String) │ │ │ │ │ │ +│ datetime64 │ Nullable(String) │ │ │ │ │ │ +└────────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ +``` + +#### input_format_try_infer_datetimes_only_datetime64 + +If enabled, ClickHouse will always infer `DateTime64(9)` when `input_format_try_infer_datetimes` is enabled even if datetime values don't contain fractional part. + +Disabled by default. + +**Examples** + +```sql +SET input_format_try_infer_datetimes = 1; +SET input_format_try_infer_datetimes_only_datetime64 = 1; +DESC format(JSONEachRow, $$ + {"datetime" : "2021-01-01 00:00:00", "datetime64" : "2021-01-01 00:00:00.000"} + {"datetime" : "2022-01-01 00:00:00", "datetime64" : "2022-01-01 00:00:00.000"} + $$) +``` + +```text +┌─name───────┬─type────────────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐ +│ datetime │ Nullable(DateTime64(9)) │ │ │ │ │ │ +│ datetime64 │ Nullable(DateTime64(9)) │ │ │ │ │ │ +└────────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘ ``` Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings-formats.md#date_time_input_format) diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index 5b7615485ca..8d9dce983bc 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -10,7 +10,7 @@ sidebar_label: Visual Interfaces ### ch-ui {#ch-ui} -[ch-ui](https://github.com/caioricciuti/ch-ui) is a simple React.js app interface for ClickHouse databases, designed for executing queries and visualizing data. Built with React and the ClickHouse client for web, it offers a sleek and user-friendly UI for easy database interactions. +[ch-ui](https://github.com/caioricciuti/ch-ui) is a simple React.js app interface for ClickHouse databases designed for executing queries and visualizing data. Built with React and the ClickHouse client for web, it offers a sleek and user-friendly UI for easy database interactions. Features: @@ -25,7 +25,7 @@ Web interface for ClickHouse in the [Tabix](https://github.com/tabixio/tabix) pr Features: -- Works with ClickHouse directly from the browser, without the need to install additional software. +- Works with ClickHouse directly from the browser without the need to install additional software. - Query editor with syntax highlighting. - Auto-completion of commands. - Tools for graphical analysis of query execution. @@ -63,7 +63,7 @@ Features: - Table list with filtering and metadata. - Table preview with filtering and sorting. -- Read-only queries execution. +- Read-only query execution. ### Redash {#redash} @@ -75,23 +75,23 @@ Features: - Powerful editor of queries. - Database explorer. -- Visualization tools, that allow you to represent data in different forms. +- Visualization tool that allows you to represent data in different forms. ### Grafana {#grafana} [Grafana](https://grafana.com/grafana/plugins/grafana-clickhouse-datasource/) is a platform for monitoring and visualization. -"Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data driven culture. Trusted and loved by the community" — grafana.com. +"Grafana allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data-driven culture. Trusted and loved by the community" — grafana.com. -ClickHouse datasource plugin provides a support for ClickHouse as a backend database. +ClickHouse data source plugin provides support for ClickHouse as a backend database. -### qryn (#qryn) +### qryn {#qryn} [qryn](https://metrico.in) is a polyglot, high-performance observability stack for ClickHouse _(formerly cLoki)_ with native Grafana integrations allowing users to ingest and analyze logs, metrics and telemetry traces from any agent supporting Loki/LogQL, Prometheus/PromQL, OTLP/Tempo, Elastic, InfluxDB and many more. Features: -- Built in Explore UI and LogQL CLI for querying, extracting and visualizing data +- Built-in Explore UI and LogQL CLI for querying, extracting and visualizing data - Native Grafana APIs support for querying, processing, ingesting, tracing and alerting without plugins - Powerful pipeline to dynamically search, filter and extract data from logs, events, traces and beyond - Ingestion and PUSH APIs transparently compatible with LogQL, PromQL, InfluxDB, Elastic and many more @@ -139,7 +139,7 @@ Features: ### DBM {#dbm} -[DBM](https://dbm.incubator.edurt.io/) DBM is a visual management tool for ClickHouse! +[DBM](https://github.com/devlive-community/dbm) DBM is a visual management tool for ClickHouse! Features: @@ -151,7 +151,7 @@ Features: - Support custom query - Support multiple data sources management(connection test, monitoring) - Support monitor (processor, connection, query) -- Support migrate data +- Support migrating data ### Bytebase {#bytebase} @@ -169,7 +169,7 @@ Features: ### Zeppelin-Interpreter-for-ClickHouse {#zeppelin-interpreter-for-clickhouse} -[Zeppelin-Interpreter-for-ClickHouse](https://github.com/SiderZhang/Zeppelin-Interpreter-for-ClickHouse) is a [Zeppelin](https://zeppelin.apache.org) interpreter for ClickHouse. Compared with JDBC interpreter, it can provide better timeout control for long running queries. +[Zeppelin-Interpreter-for-ClickHouse](https://github.com/SiderZhang/Zeppelin-Interpreter-for-ClickHouse) is a [Zeppelin](https://zeppelin.apache.org) interpreter for ClickHouse. Compared with the JDBC interpreter, it can provide better timeout control for long-running queries. ### ClickCat {#clickcat} @@ -179,7 +179,7 @@ Features: - An online SQL editor which can run your SQL code without any installing. - You can observe all processes and mutations. For those unfinished processes, you can kill them in ui. -- The Metrics contains Cluster Analysis,Data Analysis,Query Analysis. +- The Metrics contain Cluster Analysis, Data Analysis, and Query Analysis. ### ClickVisual {#clickvisual} @@ -332,7 +332,7 @@ Learn more about the product at [TABLUM.IO](https://tablum.io/) ### CKMAN {#ckman} -[CKMAN] (https://www.github.com/housepower/ckman) is a tool for managing and monitoring ClickHouse clusters! +[CKMAN](https://www.github.com/housepower/ckman) is a tool for managing and monitoring ClickHouse clusters! Features: diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md index 59ee05d1f9e..1c82aeaaf2c 100644 --- a/docs/en/operations/named-collections.md +++ b/docs/en/operations/named-collections.md @@ -73,13 +73,21 @@ In the above example the `password_sha256_hex` value is the hexadecimal represen ### Storage for named collections -Named collections can either be stored on local disk or in zookeeper/keeper. By default local storage is used. +Named collections can either be stored on local disk or in ZooKeeper/Keeper. By default local storage is used. +They can also be stored using encryption with the same algorithms used for [disk encryption](storing-data#encrypted-virtual-file-system), +where `aes_128_ctr` is used by default. -To configure named collections storage in keeper and a `type` (equal to either `keeper` or `zookeeper`) and `path` (path in keeper, where named collections will be stored) to `named_collections_storage` section in configuration file: +To configure named collections storage you need to specify a `type`. This can be either `local` or `keeper`/`zookeeper`. For encrypted storage, +you can use `local_encrypted` or `keeper_encrypted`/`zookeeper_encrypted`. + +To use ZooKeeper/Keeper we also need to set up a `path` (path in ZooKeeper/Keeper, where named collections will be stored) to +`named_collections_storage` section in configuration file. The following example uses encryption and ZooKeeper/Keeper: ``` - zookeeper + zookeeper_encrypted + bebec0cabebec0cabebec0cabebec0ca + aes_128_ctr /named_collections_path/ 1000 @@ -307,8 +315,22 @@ SELECT dictGet('dict', 'B', 2); ## Named collections for accessing PostgreSQL database -The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md). +The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md). Additionally, there are aliases: +- `username` for `user` +- `db` for `database`. + +Parameter `addresses_expr` is used in a collection instead of `host:port`. The parameter is optional, because there are other optional ones: `host`, `hostname`, `port`. The following pseudo code explains the priority: + +```sql +CASE + WHEN collection['addresses_expr'] != '' THEN collection['addresses_expr'] + WHEN collection['host'] != '' THEN collection['host'] || ':' || if(collection['port'] != '', collection['port'], '5432') + WHEN collection['hostname'] != '' THEN collection['hostname'] || ':' || if(collection['port'] != '', collection['port'], '5432') +END +``` + +Example of creation: ```sql CREATE NAMED COLLECTION mypg AS user = 'pguser', @@ -316,7 +338,7 @@ password = 'jw8s0F4', host = '127.0.0.1', port = 5432, database = 'test', -schema = 'test_schema', +schema = 'test_schema' ``` Example of configuration: @@ -369,6 +391,10 @@ SELECT * FROM mypgtable; └───┘ ``` +:::note +PostgreSQL copies data from the named collection when the table is being created. A change in the collection does not affect the existing tables. +::: + ### Example of using named collections with database with engine PostgreSQL ```sql @@ -478,7 +504,7 @@ kafka_topic_list = 'kafka_topic', kafka_group_name = 'consumer_group', kafka_format = 'JSONEachRow', kafka_max_block_size = '1048576'; - + ``` ### XML example diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index a3bd919d3ce..a13aacc76e6 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -1042,10 +1042,23 @@ Compression rates of LZ4 or ZSTD improve on average by 20-40%. This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values. High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting. -### deduplicate_merge_projection_mode +## lightweight_mutation_projection_mode + +By default, lightweight delete `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. So the default value would be `throw`. +However, this option can change the behavior. With the value either `drop` or `rebuild`, deletes will work with projections. `drop` would delete the projection so it might be fast in the current query as projection gets deleted but slow in future queries as no projection attached. +`rebuild` would rebuild the projection which might affect the performance of the current query, but might speedup for future queries. A good thing is that these options would only work in the part level, +which means projections in the part that don't get touched would stay intact instead of triggering any action like drop or rebuild. + +Possible values: + +- throw, drop, rebuild + +Default value: throw + +## deduplicate_merge_projection_mode Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting. -It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. +It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level. Possible values: diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index f8b40cd81ac..fcec0afb8d2 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -194,6 +194,17 @@ If enabled, ClickHouse will try to infer type `DateTime64` from string fields in Enabled by default. +## input_format_try_infer_variants {#input_format_try_infer_variants} + +If enabled, ClickHouse will try to infer type [`Variant`](../../sql-reference/data-types/variant.md) in schema inference for text formats when there is more than one possible type for column/array elements. + +Possible values: + +- 0 — Disabled. +- 1 — Enabled. + +Default value: `0`. + ## date_time_input_format {#date_time_input_format} Allows choosing a parser of the text representation of date and time. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 03ff6067a8f..19db4be17db 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1381,7 +1381,7 @@ Default value: `2`. Close connection before returning connection to the pool. -Default value: true. +Default value: false. ## odbc_bridge_connection_pool_size {#odbc-bridge-connection-pool-size} @@ -5620,6 +5620,19 @@ Minimal size of block to compress in CROSS JOIN. Zero value means - disable this Default value: `1GiB`. +## use_json_alias_for_old_object_type + +When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/newjson.md) type. +This setting requires server restart to take effect when changed. + +Default value: `false`. + +## type_json_skip_duplicated_paths + +When enabled, ClickHouse will skip duplicated paths during parsing of [JSON](../../sql-reference/data-types/newjson.md) object. Only the value of the first occurrence of each path will be inserted. + +Default value: `false` + ## restore_replace_external_engines_to_null For testing purposes. Replaces all external engines to Null to not initiate external connections. @@ -5654,3 +5667,9 @@ Possible values: - 1 — the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine is enabled. Default value: `0`. + +## create_if_not_exists + +Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown. + +Default value: `false`. diff --git a/docs/en/operations/system-tables/view_refreshes.md b/docs/en/operations/system-tables/view_refreshes.md index 12377507b39..e792e0d095d 100644 --- a/docs/en/operations/system-tables/view_refreshes.md +++ b/docs/en/operations/system-tables/view_refreshes.md @@ -17,7 +17,8 @@ Columns: - `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took. - `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start. - `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled. -- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Exception'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace. +- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Error'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace. +- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — If nonzero, the current or next refresh is a retry (see `refresh_retries` refresh setting), and `retry` is the 1-based index of that retry. - `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation. - `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1. - `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far. diff --git a/docs/en/sql-reference/data-types/data-types-binary-encoding.md b/docs/en/sql-reference/data-types/data-types-binary-encoding.md index 812e946e43e..08fb664126a 100644 --- a/docs/en/sql-reference/data-types/data-types-binary-encoding.md +++ b/docs/en/sql-reference/data-types/data-types-binary-encoding.md @@ -12,57 +12,59 @@ This specification describes the binary format that can be used for binary encod The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information. `var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression. -| ClickHouse data type | Binary encoding | -|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Nothing` | `0x00` | -| `UInt8` | `0x01` | -| `UInt16` | `0x02` | -| `UInt32` | `0x03` | -| `UInt64` | `0x04` | -| `UInt128` | `0x05` | -| `UInt256` | `0x06` | -| `Int8` | `0x07` | -| `Int16` | `0x08` | -| `Int32` | `0x09` | -| `Int64` | `0x0A` | -| `Int128` | `0x0B` | -| `Int256` | `0x0C` | -| `Float32` | `0x0D` | -| `Float64` | `0x0E` | -| `Date` | `0x0F` | -| `Date32` | `0x10` | -| `DateTime` | `0x11` | -| `DateTime(time_zone)` | `0x12` | -| `DateTime64(P)` | `0x13` | -| `DateTime64(P, time_zone)` | `0x14` | -| `String` | `0x15` | -| `FixedString(N)` | `0x16` | -| `Enum8` | `0x17...` | -| `Enum16` | `0x18...>` | -| `Decimal32(P, S)` | `0x19` | -| `Decimal64(P, S)` | `0x1A` | -| `Decimal128(P, S)` | `0x1B` | -| `Decimal256(P, S)` | `0x1C` | -| `UUID` | `0x1D` | -| `Array(T)` | `0x1E` | -| `Tuple(T1, ..., TN)` | `0x1F...` | -| `Tuple(name1 T1, ..., nameN TN)` | `0x20...` | -| `Set` | `0x21` | -| `Interval` | `0x22` (see [interval kind binary encoding](#interval-kind-binary-encoding)) | -| `Nullable(T)` | `0x23` | -| `Function` | `0x24...` | -| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | -| `LowCardinality(T)` | `0x26` | -| `Map(K, V)` | `0x27` | -| `IPv4` | `0x28` | -| `IPv6` | `0x29` | -| `Variant(T1, ..., TN)` | `0x2A...` | -| `Dynamic(max_types=N)` | `0x2B` | -| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C` | -| `Bool` | `0x2D` | -| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | -| `Nested(name1 T1, ..., nameN TN)` | `0x2F...` | +| ClickHouse data type | Binary encoding | +|-----------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Nothing` | `0x00` | +| `UInt8` | `0x01` | +| `UInt16` | `0x02` | +| `UInt32` | `0x03` | +| `UInt64` | `0x04` | +| `UInt128` | `0x05` | +| `UInt256` | `0x06` | +| `Int8` | `0x07` | +| `Int16` | `0x08` | +| `Int32` | `0x09` | +| `Int64` | `0x0A` | +| `Int128` | `0x0B` | +| `Int256` | `0x0C` | +| `Float32` | `0x0D` | +| `Float64` | `0x0E` | +| `Date` | `0x0F` | +| `Date32` | `0x10` | +| `DateTime` | `0x11` | +| `DateTime(time_zone)` | `0x12` | +| `DateTime64(P)` | `0x13` | +| `DateTime64(P, time_zone)` | `0x14` | +| `String` | `0x15` | +| `FixedString(N)` | `0x16` | +| `Enum8` | `0x17...` | +| `Enum16` | `0x18...>` | +| `Decimal32(P, S)` | `0x19` | +| `Decimal64(P, S)` | `0x1A` | +| `Decimal128(P, S)` | `0x1B` | +| `Decimal256(P, S)` | `0x1C` | +| `UUID` | `0x1D` | +| `Array(T)` | `0x1E` | +| `Tuple(T1, ..., TN)` | `0x1F...` | +| `Tuple(name1 T1, ..., nameN TN)` | `0x20...` | +| `Set` | `0x21` | +| `Interval` | `0x22` (see [interval kind binary encoding](#interval-kind-binary-encoding)) | +| `Nullable(T)` | `0x23` | +| `Function` | `0x24...` | +| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | +| `LowCardinality(T)` | `0x26` | +| `Map(K, V)` | `0x27` | +| `IPv4` | `0x28` | +| `IPv6` | `0x29` | +| `Variant(T1, ..., TN)` | `0x2A...` | +| `Dynamic(max_types=N)` | `0x2B` | +| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C` | +| `Bool` | `0x2D` | +| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E......` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) | +| `Nested(name1 T1, ..., nameN TN)` | `0x2F...` | +| `JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp)` | `0x30.........` | +For type `JSON` byte `uint8_serialization_version` indicates the version of the serialization. Right now the version is always 0 but can change in future if new arguments will be introduced for `JSON` type. ### Interval kind binary encoding diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index fcb0b60d022..2b89dd145e6 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -19,7 +19,8 @@ ClickHouse data types include: - **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md) - **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md) - **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time -- **JSON**: the [`JSON` object](./json.md) stores a JSON document in a single column +- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated) +- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column - **UUID**: a performant option for storing [`UUID` values](./uuid.md) - **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column - **Arrays**: any column can be defined as an [`Array` of values](./array.md) diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index f218c8d0339..e48b308a620 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -13,7 +13,7 @@ keywords: [object, data type] Stores JavaScript Object Notation (JSON) documents in a single column. -`JSON` is an alias for `Object('json')`. +`JSON` can be used as an alias to `Object('json')` when setting [use_json_alias_for_old_object_type](../../operations/settings/settings.md#usejsonaliasforoldobjecttype) is enabled. ## Example @@ -79,5 +79,5 @@ SELECT * FROM json FORMAT JSONEachRow ## Related Content -- [Using JSON in ClickHouse](/docs/en/integrations/data-formats/json) +- [Using JSON in ClickHouse](/en/integrations/data-formats/json/overview) - [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md new file mode 100644 index 00000000000..9e43216df6c --- /dev/null +++ b/docs/en/sql-reference/data-types/newjson.md @@ -0,0 +1,516 @@ +--- +slug: /en/sql-reference/data-types/newjson +sidebar_position: 63 +sidebar_label: JSON +keywords: [json, data type] +--- + +# JSON + +Stores JavaScript Object Notation (JSON) documents in a single column. + +:::note +This feature is experimental and is not production-ready. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. +If you want to use JSON type, set `allow_experimental_json_type = 1`. +::: + +To declare a column of `JSON` type, use the following syntax: + +``` sql + JSON(max_dynamic_paths=N, max_dynamic_types=M, some.path TypeName, SKIP path.to.skip, SKIP REGEXP 'paths_regexp') +``` +Where: +- `max_dynamic_paths` is an optional parameter indicating how many paths can be stored separately as subcolumns across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all other paths will be stored together in a single structure. Default value of `max_dynamic_paths` is `1024`. +- `max_dynamic_types` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a single path column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_dynamic_types` is `32`. +- `some.path TypeName` is an optional type hint for particular path in the JSON. Such paths will be always stored as subcolumns with specified type. +- `SKIP path.to.skip` is an optional hint for particular path that should be skipped during JSON parsing. Such paths will never be stored in the JSON column. If specified path is a nested JSON object, the whole nested object will be skipped. +- `SKIP REGEXP 'path_regexp'` is an optional hint with a regular expression that is used to skip paths during JSON parsing. All paths that match this regular expression will never be stored in the JSON column. + +## Creating JSON + +Using `JSON` type in table column definition: + +```sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json────────────────────────────────────────┐ +│ {"a":{"b":"42"},"c":["1","2","3"]} │ +│ {"f":"Hello, World!"} │ +│ {"a":{"b":"43","e":"10"},"c":["4","5","6"]} │ +└─────────────────────────────────────────────┘ +``` + +```sql +CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42}, "c" : [1, 2, 3]}'), ('{"f" : "Hello, World!"}'), ('{"a" : {"b" : 43, "e" : 10}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json──────────────────────────────┐ +│ {"a":{"b":42},"c":[1,2,3]} │ +│ {"a":{"b":0},"f":"Hello, World!"} │ +│ {"a":{"b":43},"c":[4,5,6]} │ +└───────────────────────────────────┘ +``` + +Using CAST from 'String': + +```sql +SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json; +``` + +```text +┌─json───────────────────────────────────────────┐ +│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +CAST from named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later. + +## Reading JSON paths as subcolumns + +JSON type supports reading every path as a separate subcolumn. If type of the requested path was not specified in the JSON type declaration, the subcolumn of the path will always have type [Dynamic](/docs/en/sql-reference/data-types/dynamic.md). + +For example: + +```sql +CREATE TABLE test (json JSON(a.b UInt32, SKIP a.e)) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : 42, "g" : 42.42}, "c" : [1, 2, 3], "d" : "2020-01-01"}'), ('{"f" : "Hello, World!", "d" : "2020-01-02"}'), ('{"a" : {"b" : 43, "e" : 10, "g" : 43.43}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json──────────────────────────────────────────────────┐ +│ {"a":{"b":42,"g":42.42},"c":[1,2,3],"d":"2020-01-01"} │ +│ {"a":{"b":0},"d":"2020-01-02","f":"Hello, World!"} │ +│ {"a":{"b":43,"g":43.43},"c":[4,5,6]} │ +└───────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.a.b, json.a.g, json.c, json.d FROM test; +``` + +```text +┌─json.a.b─┬─json.a.g─┬─json.c──┬─json.d─────┐ +│ 42 │ 42.42 │ [1,2,3] │ 2020-01-01 │ +│ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-02 │ +│ 43 │ 43.43 │ [4,5,6] │ ᴺᵁᴸᴸ │ +└──────────┴──────────┴─────────┴────────────┘ +``` + +If the requested path wasn't found in the data, it will be filled with `NULL` values: + +```sql +SELECT json.non.existing.path FROM test; +``` + +```text +┌─json.non.existing.path─┐ +│ ᴺᵁᴸᴸ │ +│ ᴺᵁᴸᴸ │ +│ ᴺᵁᴸᴸ │ +└────────────────────────┘ +``` + +Let's check the data types of returned subcolumns: +```sql +SELECT toTypeName(json.a.b), toTypeName(json.a.g), toTypeName(json.c), toTypeName(json.d) FROM test; +``` + +```text +┌─toTypeName(json.a.b)─┬─toTypeName(json.a.g)─┬─toTypeName(json.c)─┬─toTypeName(json.d)─┐ +│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +│ UInt32 │ Dynamic │ Dynamic │ Dynamic │ +└──────────────────────┴──────────────────────┴────────────────────┴────────────────────┘ +``` + +As we can see, for `a.b` the type is `UInt32` as we specified in the JSON type declaration, and for all other subcolumns the type is `Dynamic`. + +It is also possible to read subcolumns of a `Dynamic` type using special syntax `json.some.path.:TypeName`: + +```sql +select json.a.g.:Float64, dynamicType(json.a.g), json.d.:Date, dynamicType(json.d) FROM test; +``` + +```text +┌─json.a.g.:`Float64`─┬─dynamicType(json.a.g)─┬─json.d.:`Date`─┬─dynamicType(json.d)─┐ +│ 42.42 │ Float64 │ 2020-01-01 │ Date │ +│ ᴺᵁᴸᴸ │ None │ 2020-01-02 │ Date │ +│ 43.43 │ Float64 │ ᴺᵁᴸᴸ │ None │ +└─────────────────────┴───────────────────────┴────────────────┴─────────────────────┘ +``` + +`Dynamic` subcolumns can be casted to any data type. In this case the exception will be thrown if internal type inside `Dynamic` cannot be casted to the requested type: + +```sql +select json.a.g::UInt64 as uint FROM test; +``` + +```text +┌─uint─┐ +│ 42 │ +│ 0 │ +│ 43 │ +└──────┘ +``` + +```sql +select json.a.g::UUID as float FROM test; +``` + +```text +Received exception: +Code: 48. DB::Exception: Conversion between numeric types and UUID is not supported. Probably the passed UUID is unquoted: while executing 'FUNCTION CAST(__table1.json.a.g :: 2, 'UUID'_String :: 1) -> CAST(__table1.json.a.g, 'UUID'_String) UUID : 0'. (NOT_IMPLEMENTED) +``` + +## Reading JSON sub-objects as subcolumns + +JSON type supports reading nested objects as subcolumns with type `JSON` using special syntax `json.^some.path`: + +```sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES ('{"a" : {"b" : {"c" : 42, "g" : 42.42}}, "c" : [1, 2, 3], "d" : {"e" : {"f" : {"g" : "Hello, World", "h" : [1, 2, 3]}}}}'), ('{"f" : "Hello, World!", "d" : {"e" : {"f" : {"h" : [4, 5, 6]}}}}'), ('{"a" : {"b" : {"c" : 43, "e" : 10, "g" : 43.43}}, "c" : [4, 5, 6]}'); +SELECT json FROM test; +``` + +```text +┌─json────────────────────────────────────────────────────────────────────────────────────────┐ +│ {"a":{"b":{"c":42,"g":42.42}},"c":[1,2,3],"d":{"e":{"f":{"g":"Hello, World","h":[1,2,3]}}}} │ +│ {"d":{"e":{"f":{"h":[4,5,6]}}},"f":"Hello, World!"} │ +│ {"a":{"b":{"c":43,"e":10,"g":43.43}},"c":[4,5,6]} │ +└─────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.^a.b, json.^d.e.f FROM test; +``` + +```text +┌─json.^`a`.b───────────────┬─json.^`d`.e.f────────────────────┐ +│ {"c":42,"g":42.42} │ {"g":"Hello, World","h":[1,2,3]} │ +│ {} │ {"h":[4,5,6]} │ +│ {"c":43,"e":10,"g":43.43} │ {} │ +└───────────────────────────┴──────────────────────────────────┘ +``` + +:::note +Reading sub-objects as subcolumns may be inefficient, as this may require almost full scan of the JSON data. +::: + +## Types inference for paths + +During JSON parsing ClickHouse tries to detect the most appropriate data type for each JSON path. It works similar to [automatic schema inference from input data](/docs/en/interfaces/schema-inference.md) and controlled by the same settings: + +- [input_format_try_infer_integers](/docs/en/interfaces/schema-inference.md#inputformattryinferintegers) +- [input_format_try_infer_dates](/docs/en/interfaces/schema-inference.md#inputformattryinferdates) +- [input_format_try_infer_datetimes](/docs/en/interfaces/schema-inference.md#inputformattryinferdatetimes) +- [schema_inference_make_columns_nullable](/docs/en/interfaces/schema-inference.md#schemainferencemakecolumnsnullable) +- [input_format_json_try_infer_numbers_from_strings](/docs/en/interfaces/schema-inference.md#inputformatjsontryinfernumbersfromstrings) +- [input_format_json_infer_incomplete_types_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsoninferincompletetypesasstrings) +- [input_format_json_read_numbers_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadnumbersasstrings) +- [input_format_json_read_bools_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasstrings) +- [input_format_json_read_bools_as_numbers](/docs/en/interfaces/schema-inference.md#inputformatjsonreadboolsasnumbers) +- [input_format_json_read_arrays_as_strings](/docs/en/interfaces/schema-inference.md#inputformatjsonreadarraysasstrings) + +Let's see some examples: + +```sql +SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=1, input_format_try_infer_datetimes=1; +``` + +```text +┌─paths_with_types─────────────────┐ +│ {'a':'Date','b':'DateTime64(9)'} │ +└──────────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : "2020-01-01", "b" : "2020-01-01 10:00:00"}'::JSON) AS paths_with_types settings input_format_try_infer_dates=0, input_format_try_infer_datetimes=0; +``` + +```text +┌─paths_with_types────────────┐ +│ {'a':'String','b':'String'} │ +└─────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=1; +``` + +```text +┌─paths_with_types───────────────┐ +│ {'a':'Array(Nullable(Int64))'} │ +└────────────────────────────────┘ +``` + +```sql +SELECT JSONAllPathsWithTypes('{"a" : [1, 2, 3]}'::JSON) AS paths_with_types settings schema_inference_make_columns_nullable=0; +``` + +```text +┌─paths_with_types─────┐ +│ {'a':'Array(Int64)'} │ +└──────────────────────┘ +``` + +## Handling arrays of JSON objects + +JSON paths that contains an array of objects are parsed as type `Array(JSON)` and inserted into `Dynamic` column for this path. To read an array of objects you can extract it from `Dynamic` column as a subcolumn: + +```sql +CREATE TABLE test (json JSON) ENGINE = Memory; +INSERT INTO test VALUES +('{"a" : {"b" : [{"c" : 42, "d" : "Hello", "f" : [[{"g" : 42.42}]], "k" : {"j" : 1000}}, {"c" : 43}, {"e" : [1, 2, 3], "d" : "My", "f" : [[{"g" : 43.43, "h" : "2020-01-01"}]], "k" : {"j" : 2000}}]}}'), +('{"a" : {"b" : [1, 2, 3]}}'), +('{"a" : {"b" : [{"c" : 44, "f" : [[{"h" : "2020-01-02"}]]}, {"e" : [4, 5, 6], "d" : "World", "f" : [[{"g" : 44.44}]], "k" : {"j" : 3000}}]}}'); +SELECT json FROM test; +``` + +```text3 +┌─json────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +│ {"a":{"b":[{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}},{"c":"43"},{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}]}} │ +│ {"a":{"b":["1","2","3"]}} │ +│ {"a":{"b":[{"c":"44","f":[[{"h":"2020-01-02"}]]},{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}]}} │ +└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +```sql +SELECT json.a.b, dynamicType(json.a.b) FROM test; +``` + +```text +┌─json.a.b──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─dynamicType(json.a.b)────────────────────────────────────┐ +│ ['{"c":"42","d":"Hello","f":[[{"g":42.42}]],"k":{"j":"1000"}}','{"c":"43"}','{"d":"My","e":["1","2","3"],"f":[[{"g":43.43,"h":"2020-01-01"}]],"k":{"j":"2000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │ +│ [1,2,3] │ Array(Nullable(Int64)) │ +│ ['{"c":"44","f":[[{"h":"2020-01-02"}]]}','{"d":"World","e":["4","5","6"],"f":[[{"g":44.44}]],"k":{"j":"3000"}}'] │ Array(JSON(max_dynamic_types=16, max_dynamic_paths=256)) │ +└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────────────────────────────────┘ +``` + +As you can notice, the `max_dynamic_types/max_dynamic_paths` parameters of the nested `JSON` type were reduced compared to the default values. It's needed to avoid number of subcolumns to grow uncontrolled on nested arrays of JSON objects. + +Let's try to read subcolumns from this nested `JSON` column: + +```sql +SELECT json.a.b.:`Array(JSON)`.c, json.a.b.:`Array(JSON)`.f, json.a.b.:`Array(JSON)`.d FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┐ +│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │ +└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘ +``` + +We can avoid writing `Array(JSON)` subcolumn name using special syntax: + +```sql +SELECT json.a.b[].c, json.a.b[].f, json.a.b[].d FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.c─┬─json.a.b.:`Array(JSON)`.f───────────────────────────────────┬─json.a.b.:`Array(JSON)`.d─┐ +│ [42,43,NULL] │ [[['{"g":42.42}']],NULL,[['{"g":43.43,"h":"2020-01-01"}']]] │ ['Hello',NULL,'My'] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[['{"h":"2020-01-02"}']],[['{"g":44.44}']]] │ [NULL,'World'] │ +└───────────────────────────┴─────────────────────────────────────────────────────────────┴───────────────────────────┘ +``` + +The number of `[]` after path indicates the array level. `json.path[][]` will be transformed to `json.path.:Array(Array(JSON))` + +Let's check the paths and types inside our `Array(JSON)`: + +```sql +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b[]))) FROM test; +``` + +```text +┌─arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b.:`Array(JSON)`)))──┐ +│ ('c','Int64') │ +│ ('d','String') │ +│ ('f','Array(Array(JSON(max_dynamic_types=8, max_dynamic_paths=64)))') │ +│ ('k.j','Int64') │ +│ ('e','Array(Nullable(Int64))') │ +└───────────────────────────────────────────────────────────────────────┘ +``` + +Let's read subcolumns from `Array(JSON)` column: + +```sql +SELECT json.a.b[].c.:Int64, json.a.b[].f[][].g.:Float64, json.a.b[].f[][].h.:Date FROM test; +``` + +```text +┌─json.a.b.:`Array(JSON)`.c.:`Int64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.g.:`Float64`─┬─json.a.b.:`Array(JSON)`.f.:`Array(Array(JSON))`.h.:`Date`─┐ +│ [42,43,NULL] │ [[[42.42]],[],[[43.43]]] │ [[[NULL]],[],[['2020-01-01']]] │ +│ [] │ [] │ [] │ +│ [44,NULL] │ [[[NULL]],[[44.44]]] │ [[['2020-01-02']],[[NULL]]] │ +└────────────────────────────────────┴──────────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘ +``` + +We can also read sub-object subcolumns from nested `JSON` column: + +```sql +SELECT json.a.b[].^k FROM test +``` + +```text +┌─json.a.b.:`Array(JSON)`.^`k`─────────┐ +│ ['{"j":"1000"}','{}','{"j":"2000"}'] │ +│ [] │ +│ ['{}','{"j":"3000"}'] │ +└──────────────────────────────────────┘ +``` + +## Reading JSON type from the data + +All text formats (JSONEachRow, TSV, CSV, CustomSeparated, Values, etc) supports reading `JSON` type. + +Examples: + +```sql +SELECT json FROM format(JSONEachRow, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP d.e, SKIP REGEXP \'b.*\')', ' +{"json" : {"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}}} +{"json" : {"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}}} +{"json" : {"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"}} +{"json" : {"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43}} +{"json" : {"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}} +') +``` + +```text +┌─json──────────────────────────────────────────────────────────┐ +│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │ +│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │ +│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │ +│ {"a":{"b":{"c":4}},"c":"43"} │ +│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │ +└───────────────────────────────────────────────────────────────┘ +``` + +For text formats like CSV/TSV/etc `JSON` is parsed from a string containing JSON object + +```sql +SELECT json FROM format(TSV, 'json JSON(a.b.c UInt32, SKIP a.b.d, SKIP REGEXP \'b.*\')', +'{"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}, "i" : [1, 2, 3]}} +{"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}, "i" : [4, 5, 6]}} +{"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "b" : {"c" : 10}, "e" : "Hello, World!"} +{"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43} +{"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : {"c" : 11, "j" : [1, 2, 3]}, "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44}, "h" : "2020-02-02 10:00:00"}}') +``` + +```text +┌─json──────────────────────────────────────────────────────────┐ +│ {"a":{"b":{"c":1}},"c":"42","d":{"i":["1","2","3"]}} │ +│ {"a":{"b":{"c":2}},"d":{"i":["4","5","6"]}} │ +│ {"a":{"b":{"c":3}},"e":"Hello, World!"} │ +│ {"a":{"b":{"c":4}},"c":"43"} │ +│ {"a":{"b":{"c":5}},"d":{"h":"2020-02-02 10:00:00.000000000"}} │ +└───────────────────────────────────────────────────────────────┘ +``` + +## Reaching the limit of dynamic paths inside JSON + +`JSON` data type can store only limited number of paths as separate subcolumns inside. By default, this limit is 1024, but you can change it in type declaration using parameter `max_dynamic_paths`. +When the limit is reached, all new paths inserted to `JSON` column will be stored in a single shared data structure. It's still possible to read such paths as subcolumns, but it will require reading the whole +shared data structure to extract the values of this path. This limit is needed to avoid the enormous number of different subcolumns that can make the table unusable. + +Let's see what happens when the limit is reached in different scenarios. + +### Reaching the limit during data parsing + +During parsing of `JSON` object from the data, when the limit is reached for current block of data, all new paths will be stored in a shared data structure. We can check it using introspection functions `JSONDynamicPaths, JSONSharedDataPaths`: + +```sql +SELECT json, JSONDynamicPaths(json), JSONSharedDataPaths(json) FROM format(JSONEachRow, 'json JSON(max_dynamic_paths=3)', ' +{"json" : {"a" : {"b" : 42}, "c" : [1, 2, 3]}} +{"json" : {"a" : {"b" : 43}, "d" : "2020-01-01"}} +{"json" : {"a" : {"b" : 44}, "c" : [4, 5, 6]}} +{"json" : {"a" : {"b" : 43}, "d" : "2020-01-02", "e" : "Hello", "f" : {"g" : 42.42}}} +{"json" : {"a" : {"b" : 43}, "c" : [7, 8, 9], "f" : {"g" : 43.43}, "h" : "World"}} +') +``` + +```text +┌─json───────────────────────────────────────────────────────────┬─JSONDynamicPaths(json)─┬─JSONSharedDataPaths(json)─┐ +│ {"a":{"b":"42"},"c":["1","2","3"]} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"43"},"d":"2020-01-01"} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"44"},"c":["4","5","6"]} │ ['a.b','c','d'] │ [] │ +│ {"a":{"b":"43"},"d":"2020-01-02","e":"Hello","f":{"g":42.42}} │ ['a.b','c','d'] │ ['e','f.g'] │ +│ {"a":{"b":"43"},"c":["7","8","9"],"f":{"g":43.43},"h":"World"} │ ['a.b','c','d'] │ ['f.g','h'] │ +└────────────────────────────────────────────────────────────────┴────────────────────────┴───────────────────────────┘ +``` + +As we can see, after inserting paths `e` and `f.g` the limit was reached and we inserted them into shared data structure. + +### During merges of data parts in MergeTree table engines + +During merge of several data parts in MergeTree table the `JSON` column in the resulting data part can reach the limit of dynamic paths won't be able to store all paths from source parts as subcolumns. +In this case ClickHouse chooses what paths will remain as subcolumns after merge and what types will be stored in the shared data structure. In most cases ClickHouse tries to keep paths that contains +the largest number of non-null values and move the rarest paths to the shared data structure, but it depends on the implementation. + +Let's see an example of such merge. First, let's create a table with `JSON` column, set the limit of dynamic paths to `3` and insert values with `5` different paths: + +```sql +CREATE TABLE test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree ORDER BY id; +SYSTEM STOP MERGES test; +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as a) FROM numbers(5); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as b) FROM numbers(4); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as c) FROM numbers(3); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as d) FROM numbers(2); +INSERT INTO test SELECT number, formatRow('JSONEachRow', number as e) FROM numbers(1); +``` + +Each insert will create a separate data pert with `JSON` column containing single path: +```sql +SELECT count(), JSONDynamicPaths(json) AS dynamic_paths, JSONSharedDataPaths(json) AS shared_data_paths, _part FROM test GROUP BY _part, dynamic_paths, shared_data_paths ORDER BY _part ASC +``` + +```text +┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┐ +│ 5 │ ['a'] │ [] │ all_1_1_0 │ +│ 4 │ ['b'] │ [] │ all_2_2_0 │ +│ 3 │ ['c'] │ [] │ all_3_3_0 │ +│ 2 │ ['d'] │ [] │ all_4_4_0 │ +│ 1 │ ['e'] │ [] │ all_5_5_0 │ +└─────────┴───────────────┴───────────────────┴───────────┘ + +``` + +Now, let's merge all parts into one and see what will happen: + +```sql +SYSTEM START MERGES test; +OPTIMIZE TABLE test FINAL; +SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; +``` + +```text +┌─count()─┬─dynamic_paths─┬─shared_data_paths─┬─_part─────┐ +│ 1 │ ['a','b','c'] │ ['e'] │ all_1_5_2 │ +│ 2 │ ['a','b','c'] │ ['d'] │ all_1_5_2 │ +│ 12 │ ['a','b','c'] │ [] │ all_1_5_2 │ +└─────────┴───────────────┴───────────────────┴───────────┘ +``` + +As we can see, ClickHouse kept the most frequent paths `a`, `b` and `c` and moved paths `e` and `d` to shared data structure. + +## Introspection functions + +There are several functions that can help to inspect the content of the JSON column: [JSONAllPaths](../functions/json-functions.md#jsonallpaths), [JSONAllPathsWithTypes](../functions/json-functions.md#jsonallpathswithtypes), [JSONDynamicPaths](../functions/json-functions.md#jsondynamicpaths), [JSONDynamicPathsWithTypes](../functions/json-functions.md#jsondynamicpathswithtypes), [JSONSharedDataPaths](../functions/json-functions.md#jsonshareddatapaths), [JSONSharedDataPathsWithTypes](../functions/json-functions.md#jsonshareddatapathswithtypes). + +## Tips for better usage of the JSON type + +Before creating `JSON` column and loading data into it, consider the following tips: + +- Investigate your data and specify as many path hints with types as you can. It will make the storage and the reading much more efficient. +- Think about what paths you will need and what paths you will never need. Specify paths that you won't need in the SKIP section and SKIP REGEXP if needed. It will improve the storage. +- Don't set `max_dynamic_paths` parameter to very high values, it can make the storage and reading less efficient. diff --git a/docs/en/sql-reference/functions/json-functions.md b/docs/en/sql-reference/functions/json-functions.md index 7bff6a6cba5..26fe888ab49 100644 --- a/docs/en/sql-reference/functions/json-functions.md +++ b/docs/en/sql-reference/functions/json-functions.md @@ -1155,3 +1155,207 @@ SELECT jsonMergePatch('{"a":1}', '{"name": "joey"}', '{"name": "tom"}', '{"name" │ {"a":1,"name":"zoey"} │ └───────────────────────┘ ``` + +### JSONAllPaths + +Returns the list of all paths stored in each row in [JSON](../data-types/newjson.md) column. + +**Syntax** + +``` sql +JSONAllPaths(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/newjson.md). + +**Returned value** + +- An array of paths. [Array(String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONAllPaths(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONAllPaths(json)─┐ +│ {"a":"42"} │ ['a'] │ +│ {"b":"Hello"} │ ['b'] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a','c'] │ +└──────────────────────────────────────┴────────────────────┘ +``` + +### JSONAllPathsWithTypes + +Returns the map of all paths and their data types stored in each row in [JSON](../data-types/newjson.md) column. + +**Syntax** + +``` sql +JSONAllPathsWithTypes(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/newjson.md). + +**Returned value** + +- An array of paths. [Map(String, String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONAllPathsWithTypes(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONAllPathsWithTypes(json)───────────────┐ +│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {'b':'String'} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))','c':'Date'} │ +└──────────────────────────────────────┴───────────────────────────────────────────┘ +``` + +### JSONDynamicPaths + +Returns the list of dynamic paths that are stored as separate subcolumns in [JSON](../data-types/newjson.md) column. + +**Syntax** + +``` sql +JSONDynamicPaths(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/newjson.md). + +**Returned value** + +- An array of paths. [Array(String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPaths(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONDynamicPaths(json)─┐ +| {"a":"42"} │ ['a'] │ +│ {"b":"Hello"} │ [] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a'] │ +└──────────────────────────────────────┴────────────────────────┘ +``` + +### JSONDynamicPathsWithTypes + +Returns the map of dynamic paths that are stored as separate subcolumns and their types in each row in [JSON](../data-types/newjson.md) column. + +**Syntax** + +``` sql +JSONAllPathsWithTypes(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/newjson.md). + +**Returned value** + +- An array of paths. [Map(String, String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPathsWithTypes(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONDynamicPathsWithTypes(json)─┐ +│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))'} │ +└──────────────────────────────────────┴─────────────────────────────────┘ +``` + +### JSONSharedDataPaths + +Returns the list of paths that are stored in shared data structure in [JSON](../data-types/newjson.md) column. + +**Syntax** + +``` sql +JSONSharedDataPaths(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/newjson.md). + +**Returned value** + +- An array of paths. [Array(String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONSharedDataPaths(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONSharedDataPaths(json)─┐ +│ {"a":"42"} │ [] │ +│ {"b":"Hello"} │ ['b'] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['c'] │ +└──────────────────────────────────────┴───────────────────────────┘ +``` + +### JSONSharedDataPathsWithTypes + +Returns the map of paths that are stored in shared data structure and their types in each row in [JSON](../data-types/newjson.md) column. + +**Syntax** + +``` sql +JSONSharedDataPathsWithTypes(json) +``` + +**Arguments** + +- `json` — [JSON](../data-types/newjson.md). + +**Returned value** + +- An array of paths. [Map(String, String)](../data-types/array.md). + +**Example** + +``` sql +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONSharedDataPathsWithTypes(json) FROM test; +``` + +```text +┌─json─────────────────────────────────┬─JSONSharedDataPathsWithTypes(json)─┐ +│ {"a":"42"} │ {} │ +│ {"b":"Hello"} │ {'b':'String'} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'c':'Date'} │ +└──────────────────────────────────────┴────────────────────────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index 9e4b39e95e4..edd04580f27 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -6103,30 +6103,23 @@ Result: └───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘ ``` -## toInterval(Year\|Quarter\|Month\|Week\|Day\|Hour\|Minute\|Second) +## toIntervalYear -Converts a Number type argument to an [Interval](../data-types/special-data-types/interval.md) data type. +Returns an interval of `n` years of data type [IntervalYear](../data-types/special-data-types/interval.md). **Syntax** ``` sql -toIntervalSecond(number) -toIntervalMinute(number) -toIntervalHour(number) -toIntervalDay(number) -toIntervalWeek(number) -toIntervalMonth(number) -toIntervalQuarter(number) -toIntervalYear(number) +toIntervalYear(n) ``` **Arguments** -- `number` — Duration of interval. Positive integer number. +- `n` — Number of years. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). **Returned values** -- The value in `Interval` data type. +- Interval of `n` years. [IntervalYear](../data-types/special-data-types/interval.md). **Example** @@ -6134,20 +6127,387 @@ Query: ``` sql WITH - toDate('2019-01-01') AS date, - INTERVAL 1 WEEK AS interval_week, - toIntervalWeek(1) AS interval_to_week -SELECT - date + interval_week, - date + interval_to_week; + toDate('2024-06-15') AS date, + toIntervalYear(1) AS interval_to_year +SELECT date + interval_to_year AS result ``` Result: ```response -┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐ -│ 2019-01-08 │ 2019-01-08 │ -└───────────────────────────┴──────────────────────────────┘ +┌─────result─┐ +│ 2025-06-15 │ +└────────────┘ +``` + +## toIntervalQuarter + +Returns an interval of `n` quarters of data type [IntervalQuarter](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalQuarter(n) +``` + +**Arguments** + +- `n` — Number of quarters. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` quarters. [IntervalQuarter](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDate('2024-06-15') AS date, + toIntervalQuarter(1) AS interval_to_quarter +SELECT date + interval_to_quarter AS result +``` + +Result: + +```response +┌─────result─┐ +│ 2024-09-15 │ +└────────────┘ +``` + +## toIntervalMonth + +Returns an interval of `n` months of data type [IntervalMonth](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalMonth(n) +``` + +**Arguments** + +- `n` — Number of months. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` months. [IntervalMonth](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDate('2024-06-15') AS date, + toIntervalMonth(1) AS interval_to_month +SELECT date + interval_to_month AS result +``` + +Result: + +```response +┌─────result─┐ +│ 2024-07-15 │ +└────────────┘ +``` + +## toIntervalWeek + +Returns an interval of `n` weeks of data type [IntervalWeek](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalWeek(n) +``` + +**Arguments** + +- `n` — Number of weeks. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` weeks. [IntervalWeek](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDate('2024-06-15') AS date, + toIntervalWeek(1) AS interval_to_week +SELECT date + interval_to_week AS result +``` + +Result: + +```response +┌─────result─┐ +│ 2024-06-22 │ +└────────────┘ +``` + +## toIntervalDay + +Returns an interval of `n` days of data type [IntervalDay](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalDay(n) +``` + +**Arguments** + +- `n` — Number of days. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` days. [IntervalDay](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDate('2024-06-15') AS date, + toIntervalDay(5) AS interval_to_days +SELECT date + interval_to_days AS result +``` + +Result: + +```response +┌─────result─┐ +│ 2024-06-20 │ +└────────────┘ +``` + +## toIntervalHour + +Returns an interval of `n` hours of data type [IntervalHour](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalHour(n) +``` + +**Arguments** + +- `n` — Number of hours. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` hours. [IntervalHour](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDate('2024-06-15') AS date, + toIntervalHour(12) AS interval_to_hours +SELECT date + interval_to_hours AS result +``` + +Result: + +```response +┌──────────────result─┐ +│ 2024-06-15 12:00:00 │ +└─────────────────────┘ +``` + +## toIntervalMinute + +Returns an interval of `n` minutes of data type [IntervalMinute](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalMinute(n) +``` + +**Arguments** + +- `n` — Number of minutes. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` minutes. [IntervalMinute](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDate('2024-06-15') AS date, + toIntervalMinute(12) AS interval_to_minutes +SELECT date + interval_to_minutes AS result +``` + +Result: + +```response +┌──────────────result─┐ +│ 2024-06-15 00:12:00 │ +└─────────────────────┘ +``` + +## toIntervalSecond + +Returns an interval of `n` seconds of data type [IntervalSecond](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalSecond(n) +``` + +**Arguments** + +- `n` — Number of seconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` seconds. [IntervalSecond](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDate('2024-06-15') AS date, + toIntervalSecond(30) AS interval_to_seconds +SELECT date + interval_to_seconds AS result +``` + +Result: + +```response +┌──────────────result─┐ +│ 2024-06-15 00:00:30 │ +└─────────────────────┘ +``` + +## toIntervalMillisecond + +Returns an interval of `n` milliseconds of data type [IntervalMillisecond](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalMillisecond(n) +``` + +**Arguments** + +- `n` — Number of milliseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` milliseconds. [IntervalMilliseconds](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDateTime('2024-06-15') AS date, + toIntervalMillisecond(30) AS interval_to_milliseconds +SELECT date + interval_to_milliseconds AS result +``` + +Result: + +```response +┌──────────────────result─┐ +│ 2024-06-15 00:00:00.030 │ +└─────────────────────────┘ +``` + +## toIntervalMicrosecond + +Returns an interval of `n` microseconds of data type [IntervalMicrosecond](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalMicrosecond(n) +``` + +**Arguments** + +- `n` — Number of microseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` microseconds. [IntervalMicrosecond](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDateTime('2024-06-15') AS date, + toIntervalMicrosecond(30) AS interval_to_microseconds +SELECT date + interval_to_microseconds AS result +``` + +Result: + +```response +┌─────────────────────result─┐ +│ 2024-06-15 00:00:00.000030 │ +└────────────────────────────┘ +``` + +## toIntervalNanosecond + +Returns an interval of `n` nanoseconds of data type [IntervalNanosecond](../data-types/special-data-types/interval.md). + +**Syntax** + +``` sql +toIntervalNanosecond(n) +``` + +**Arguments** + +- `n` — Number of nanoseconds. Integer numbers or string representations thereof, and float numbers. [(U)Int*](../data-types/int-uint.md)/[Float*](../data-types/float.md)/[String](../data-types/string.md). + +**Returned values** + +- Interval of `n` nanoseconds. [IntervalNanosecond](../data-types/special-data-types/interval.md). + +**Example** + +Query: + +``` sql +WITH + toDateTime('2024-06-15') AS date, + toIntervalNanosecond(30) AS interval_to_nanoseconds +SELECT date + interval_to_nanoseconds AS result +``` + +Result: + +```response +┌────────────────────────result─┐ +│ 2024-06-15 00:00:00.000000030 │ +└───────────────────────────────┘ ``` ## parseDateTime diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 2931f7020fb..45e7a41e8a2 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -13,8 +13,8 @@ Creates a new view. Views can be [normal](#normal-view), [materialized](#materia Syntax: ``` sql -CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] -[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] +CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] +[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] AS SELECT ... [COMMENT 'comment'] ``` @@ -55,8 +55,8 @@ SELECT * FROM view(column1=value1, column2=value2 ...) ## Materialized View ``` sql -CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] -[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] +CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] +[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] AS SELECT ... [COMMENT 'comment'] ``` @@ -92,7 +92,7 @@ Given that `POPULATE` works like `CREATE TABLE ... AS SELECT ...` it has limitat - It is not supported with Replicated database - It is not supported in ClickHouse cloud -Instead a separate `INSERT ... SELECT` can be used. +Instead a separate `INSERT ... SELECT` can be used. ::: A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`. @@ -110,7 +110,7 @@ To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop `DEFINER` and `SQL SECURITY` allow you to specify which ClickHouse user to use when executing the view's underlying query. `SQL SECURITY` has three legal values: `DEFINER`, `INVOKER`, or `NONE`. You can specify any existing user or `CURRENT_USER` in the `DEFINER` clause. -The following table will explain which rights are required for which user in order to select from view. +The following table will explain which rights are required for which user in order to select from view. Note that regardless of the SQL security option, in every case it is still required to have `GRANT SELECT ON ` in order to read from it. | SQL security option | View | Materialized View | @@ -130,7 +130,7 @@ If `DEFINER`/`SQL SECURITY` aren't specified, the default values are used: If a view is attached without `DEFINER`/`SQL SECURITY` specified, the default value is `SQL SECURITY NONE` for the materialized view and `SQL SECURITY INVOKER` for the normal view. -To change SQL security for an existing view, use +To change SQL security for an existing view, use ```sql ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }] ``` @@ -161,6 +161,8 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name REFRESH EVERY|AFTER interval [OFFSET interval] RANDOMIZE FOR interval DEPENDS ON [db.]name [, [db.]name [, ...]] +SETTINGS name = value [, name = value [, ...]] +[APPEND] [TO[db.]name] [(columns)] [ENGINE = engine] [EMPTY] AS SELECT ... [COMMENT 'comment'] @@ -170,18 +172,23 @@ where `interval` is a sequence of simple intervals: number SECOND|MINUTE|HOUR|DAY|WEEK|MONTH|YEAR ``` -Periodically runs the corresponding query and stores its result in a table, atomically replacing the table's previous contents. +Periodically runs the corresponding query and stores its result in a table. + * If the query says `APPEND`, each refresh inserts rows into the table without deleting existing rows. The insert is not atomic, just like a regular INSERT SELECT. + * Otherwise each refresh atomically replaces the table's previous contents. Differences from regular non-refreshable materialized views: - * No insert trigger. I.e. when new data is inserted into the table specified in SELECT, it's *not* automatically pushed to the refreshable materialized view. The periodic refresh runs the entire query and replaces the entire table. + * No insert trigger. I.e. when new data is inserted into the table specified in SELECT, it's *not* automatically pushed to the refreshable materialized view. The periodic refresh runs the entire query. * No restrictions on the SELECT query. Table functions (e.g. `url()`), views, UNION, JOIN, are all allowed. +:::note +The settings in the `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query. +::: + :::note Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations: * not compatible with Replicated database or table engines * It is not supported in ClickHouse Cloud * require [Atomic database engine](../../../engines/database-engines/atomic.md), - * no retries for failed refresh - we just skip to the next scheduled refresh time, * no limit on number of concurrent refreshes. ::: @@ -246,15 +253,22 @@ A few more examples: `DEPENDS ON` only works between refreshable materialized views. Listing a regular table in the `DEPENDS ON` list will prevent the view from ever refreshing (dependencies can be removed with `ALTER`, see below). ::: +### Settings + +Available refresh settings: + * `refresh_retries` - How many times to retry if refresh query fails with an exception. If all retries fail, skip to the next scheduled refresh time. 0 means no retries, -1 means infinite retries. Default: 0. + * `refresh_retry_initial_backoff_ms` - Delay before the first retry, if `refresh_retries` is not zero. Each subsequent retry doubles the delay, up to `refresh_retry_max_backoff_ms`. Default: 100 ms. + * `refresh_retry_max_backoff_ms` - Limit on the exponential growth of delay between refresh attempts. Default: 60000 ms (1 minute). + ### Changing Refresh Parameters {#changing-refresh-parameters} To change refresh parameters: ``` -ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPENDS ON ...] +ALTER TABLE [db.]name MODIFY REFRESH EVERY|AFTER ... [RANDOMIZE FOR ...] [DEPENDS ON ...] [SETTINGS ...] ``` :::note -This replaces refresh schedule *and* dependencies. If the table had a `DEPENDS ON`, doing a `MODIFY REFRESH` without `DEPENDS ON` will remove the dependencies. +This replaces *all* refresh parameters at once: schedule, dependencies, settings, and APPEND-ness. E.g. if the table had a `DEPENDS ON`, doing a `MODIFY REFRESH` without `DEPENDS ON` will remove the dependencies. ::: ### Other operations @@ -263,6 +277,10 @@ The status of all refreshable materialized views is available in table [`system. To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views). +:::note +Fun fact: the refresh query is allowed to read from the view that's being refreshed, seeing pre-refresh version of the data. This means you can implement Conway's game of life: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA== +::: + ## Window View [Experimental] :::info diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md index 88a9c933519..78142f880fe 100644 --- a/docs/en/sql-reference/statements/delete.md +++ b/docs/en/sql-reference/statements/delete.md @@ -38,8 +38,7 @@ If you anticipate frequent deletes, consider using a [custom partitioning key](/ ### Lightweight `DELETE`s with projections -By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation and may require the projection to be rebuilt, negatively affecting `DELETE` performance. -However, there is an option to change this behavior. By changing setting `lightweight_mutation_projection_mode = 'drop'`, deletes will work with projections. +By default, `DELETE` does not work for tables with projections. This is because rows in a projection may be affected by a `DELETE` operation. But there is a [MergeTree setting](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings) `lightweight_mutation_projection_mode` can change the behavior. ## Performance considerations when using lightweight `DELETE` diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index 43fa344a16d..6118f4c1d36 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -200,6 +200,7 @@ Hierarchy of privileges: - `JDBC` - `HDFS` - `S3` + - `POSTGRES` - [dictGet](#dictget) - [displaySecretsInShowAndSelect](#displaysecretsinshowandselect) - [NAMED COLLECTION ADMIN](#named-collection-admin) @@ -476,6 +477,7 @@ Allows using external data sources. Applies to [table engines](../../engines/tab - `JDBC`. Level: `GLOBAL` - `HDFS`. Level: `GLOBAL` - `S3`. Level: `GLOBAL` + - `POSTGRES`. Level: `GLOBAL` The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges. diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 35f2f15dd80..3ebcf617491 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -400,7 +400,7 @@ SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_ After running this statement the `[db.]replicated_merge_tree_family_table_name` fetches commands from the common replicated log into its own replication queue, and then the query waits till the replica processes all of the fetched commands. The following modifiers are supported: - If a `STRICT` modifier was specified then the query waits for the replication queue to become empty. The `STRICT` version may never succeed if new entries constantly appear in the replication queue. - - If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed. + - If a `LIGHTWEIGHT` modifier was specified then the query waits only for `GET_PART`, `ATTACH_PART`, `DROP_RANGE`, `REPLACE_RANGE` and `DROP_PART` entries to be processed. Additionally, the LIGHTWEIGHT modifier supports an optional FROM 'srcReplicas' clause, where 'srcReplicas' is a comma-separated list of source replica names. This extension allows for more targeted synchronization by focusing only on replication tasks originating from the specified source replicas. - If a `PULL` modifier was specified then the query pulls new replication queue entries from ZooKeeper, but does not wait for anything to be processed. @@ -526,6 +526,10 @@ Trigger an immediate out-of-schedule refresh of a given view. SYSTEM REFRESH VIEW [db.]name ``` +### REFRESH VIEW + +Wait for the currently running refresh to complete. If the refresh fails, throws an exception. If no refresh is running, completes immediately, throwing an exception if previous refresh failed. + ### STOP VIEW, STOP VIEWS Disable periodic refreshing of the given view or all refreshable views. If a refresh is in progress, cancel it too. diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index f96e48d914d..30d2e371c7e 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -116,7 +116,7 @@ SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parque ## Storage Settings {#storage-settings} - [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. -- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [hdfs_create_new_file_on_insert](/docs/en/operations/settings/settings.md#hdfs_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default. - [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default. - [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs. diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 1bd9f38517e..181c92b92d4 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -290,7 +290,7 @@ SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date ## Storage Settings {#storage-settings} - [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. -- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default. +- [s3_create_new_file_on_insert](/docs/en/operations/settings/settings.md#s3_create_new_file_on_insert) - allows to create a new file on each insert if format has suffix. Disabled by default. - [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default. **See Also** diff --git a/docs/ru/operations/named-collections.md b/docs/ru/operations/named-collections.md index 48ee7c9f15d..67656f24ba3 100644 --- a/docs/ru/operations/named-collections.md +++ b/docs/ru/operations/named-collections.md @@ -146,7 +146,30 @@ SELECT dictGet('dict', 'B', 2); ## Пример использования именованных соединений с базой данных PostgreSQL -Описание параметров смотрите [postgresql](../sql-reference/table-functions/postgresql.md). +Описание параметров смотрите [postgresql](../sql-reference/table-functions/postgresql.md). Дополнительно есть алиасы: +- `username` для `user` +- `db` для `database`. + +Параметр `addresses_expr` используется в коллекции вместо `host:port`. Параметр опционален, потому что есть так же другие: `host`, `hostname`, `port`. Следующий псевдокод показывает приоритет: + +```sql +CASE + WHEN collection['addresses_expr'] != '' THEN collection['addresses_expr'] + WHEN collection['host'] != '' THEN collection['host'] || ':' || if(collection['port'] != '', collection['port'], '5432') + WHEN collection['hostname'] != '' THEN collection['hostname'] || ':' || if(collection['port'] != '', collection['port'], '5432') +END +``` + +Пример создания: +```sql +CREATE NAMED COLLECTION mypg AS +user = 'pguser', +password = 'jw8s0F4', +host = '127.0.0.1', +port = 5432, +database = 'test', +schema = 'test_schema' +``` Пример конфигурации: ```xml @@ -199,6 +222,10 @@ SELECT * FROM mypgtable; └───┘ ``` +:::note +PostgreSQL копирует данные из named collection при создании таблицы. Изменения в коллекции не влияют на существующие таблицы. +::: + ### Пример использования именованных соединений базой данных с движком PostgreSQL ```sql diff --git a/programs/bash-completion/completions/CMakeLists.txt b/programs/bash-completion/completions/CMakeLists.txt index d364e07ef6e..2e911e81981 100644 --- a/programs/bash-completion/completions/CMakeLists.txt +++ b/programs/bash-completion/completions/CMakeLists.txt @@ -6,6 +6,7 @@ macro(configure_bash_completion) COMMAND ${PKG_CONFIG_BIN} --variable=completionsdir bash-completion OUTPUT_VARIABLE ${out} OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND_ERROR_IS_FATAL ANY ) endif() string(REPLACE /usr "${CMAKE_INSTALL_PREFIX}" out "${out}") diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index 48dca82eb2b..36f774a3c12 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -75,6 +75,8 @@ public: const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, + const String & proto_recv_chunked_, const String & quota_key_, const String & stage, bool randomize_, @@ -128,7 +130,9 @@ public: connections.emplace_back(std::make_unique( concurrency, cur_host, cur_port, - default_database_, user_, password_, quota_key_, + default_database_, user_, password_, + proto_send_chunked_, proto_recv_chunked_, + quota_key_, /* cluster_= */ "", /* cluster_secret_= */ "", /* client_name_= */ std::string(DEFAULT_CLIENT_NAME), @@ -662,6 +666,50 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) Strings hosts = options.count("host") ? options["host"].as() : Strings({"localhost"}); + String proto_send_chunked {"notchunked"}; + String proto_recv_chunked {"notchunked"}; + + if (options.count("proto_caps")) + { + std::string proto_caps_str = options["proto_caps"].as(); + + std::vector proto_caps; + splitInto<','>(proto_caps, proto_caps_str); + + for (auto cap_str : proto_caps) + { + std::string direction; + + if (cap_str.starts_with("send_")) + { + direction = "send"; + cap_str = cap_str.substr(std::string_view("send_").size()); + } + else if (cap_str.starts_with("recv_")) + { + direction = "recv"; + cap_str = cap_str.substr(std::string_view("recv_").size()); + } + + if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str); + + if (direction.empty()) + { + proto_send_chunked = cap_str; + proto_recv_chunked = cap_str; + } + else + { + if (direction == "send") + proto_send_chunked = cap_str; + else + proto_recv_chunked = cap_str; + } + } + } + + Benchmark benchmark( options["concurrency"].as(), options["delay"].as(), @@ -673,6 +721,8 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) options["database"].as(), options["user"].as(), options["password"].as(), + proto_send_chunked, + proto_recv_chunked, options["quota_key"].as(), options["stage"].as(), options.count("randomize"), diff --git a/programs/client/clickhouse-client.xml b/programs/client/clickhouse-client.xml index c32b63413e9..6eb8976a6ef 100644 --- a/programs/client/clickhouse-client.xml +++ b/programs/client/clickhouse-client.xml @@ -38,6 +38,24 @@ {display_name} \e[1;31m:)\e[0m + + + + + + 9000 + + + diff --git a/src/Access/AccessBackup.cpp b/src/Access/AccessBackup.cpp index 90effdab70f..d9ee89b45ce 100644 --- a/src/Access/AccessBackup.cpp +++ b/src/Access/AccessBackup.cpp @@ -93,7 +93,7 @@ namespace break; } - UUID id = parse(line); + UUID id = parse(line.substr(0, line.find('\t'))); line.clear(); String queries; diff --git a/src/Access/HTTPAuthClient.h b/src/Access/HTTPAuthClient.h index a8b56cf05a7..a1b97a729a3 100644 --- a/src/Access/HTTPAuthClient.h +++ b/src/Access/HTTPAuthClient.h @@ -82,7 +82,8 @@ public: Result authenticate(const String & user_name, const String & password) const { - Poco::Net::HTTPRequest request{Poco::Net::HTTPRequest::HTTP_GET, this->getURI().getPathAndQuery()}; + Poco::Net::HTTPRequest request{ + Poco::Net::HTTPRequest::HTTP_GET, this->getURI().getPathAndQuery(), Poco::Net::HTTPRequest::HTTP_1_1}; Poco::Net::HTTPBasicCredentials basic_credentials{user_name, password}; basic_credentials.authenticate(request); diff --git a/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp b/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp index fa90846650d..ecd848f5af3 100644 --- a/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp +++ b/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp @@ -114,7 +114,7 @@ private: { if (ind < first.size()) return first[ind]; - return second[ind % first.size()]; + return second[ind - first.size()]; } size_t size() const diff --git a/src/Analyzer/ConstantNode.cpp b/src/Analyzer/ConstantNode.cpp index c65090f5b55..3a99ad08ad8 100644 --- a/src/Analyzer/ConstantNode.cpp +++ b/src/Analyzer/ConstantNode.cpp @@ -177,9 +177,10 @@ ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const * It could also lead to ambiguous parsing because we don't know if the string literal represents a date or a Decimal64 literal. * For this reason, we use a string literal representing a date instead of a Decimal64 literal. */ - if (WhichDataType(constant_value_type->getTypeId()).isDateTime64()) + const auto & constant_value_end_type = removeNullable(constant_value_type); /// if Nullable + if (WhichDataType(constant_value_end_type->getTypeId()).isDateTime64()) { - const auto * date_time_type = typeid_cast(constant_value_type.get()); + const auto * date_time_type = typeid_cast(constant_value_end_type.get()); DecimalField decimal_value; if (constant_value_literal.tryGet>(decimal_value)) { diff --git a/src/Analyzer/FunctionNode.cpp b/src/Analyzer/FunctionNode.cpp index e98b04fe9a9..8e4e0725a2d 100644 --- a/src/Analyzer/FunctionNode.cpp +++ b/src/Analyzer/FunctionNode.cpp @@ -242,7 +242,8 @@ ASTPtr FunctionNode::toASTImpl(const ConvertToASTOptions & options) const /// Avoid cast for `IN tuple(...)` expression. /// Tuples could be quite big, and adding a type may significantly increase query size. /// It should be safe because set type for `column IN tuple` is deduced from `column` type. - if (isNameOfInFunction(function_name) && argument_nodes.size() > 1 && argument_nodes[1]->getNodeType() == QueryTreeNodeType::CONSTANT) + if (isNameOfInFunction(function_name) && argument_nodes.size() > 1 && argument_nodes[1]->getNodeType() == QueryTreeNodeType::CONSTANT + && !static_cast(argument_nodes[1].get())->hasSourceExpression()) new_options.add_cast_for_constants = false; const auto & parameters = getParameters(); diff --git a/src/Analyzer/Resolve/IdentifierResolver.cpp b/src/Analyzer/Resolve/IdentifierResolver.cpp index a79433ac130..14d4acc7c9b 100644 --- a/src/Analyzer/Resolve/IdentifierResolver.cpp +++ b/src/Analyzer/Resolve/IdentifierResolver.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include @@ -452,10 +452,10 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromCompoundExpression( if (auto * column = compound_expression->as()) { const DataTypePtr & column_type = column->getColumn().getTypeInStorage(); - if (column_type->getTypeId() == TypeIndex::Object) + if (column_type->getTypeId() == TypeIndex::ObjectDeprecated) { - const auto * object_type = checkAndGetDataType(column_type.get()); - if (object_type->getSchemaFormat() == "json" && object_type->hasNullableSubcolumns()) + const auto & object_type = checkAndGetDataType(*column_type); + if (object_type.getSchemaFormat() == "json" && object_type.hasNullableSubcolumns()) { QueryTreeNodePtr constant_node_null = std::make_shared(Field()); return constant_node_null; @@ -1000,7 +1000,6 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromJoin(const Identifi if (!join_node_in_resolve_process && from_join_node.isUsingJoinExpression()) { auto & join_using_list = from_join_node.getJoinExpression()->as(); - for (auto & join_using_node : join_using_list.getNodes()) { auto & column_node = join_using_node->as(); diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 2e8acf2f201..004da5ed341 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 8b45c816817..0b93ae6d547 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -490,8 +490,6 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context /// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously. auto process_list_element = context_in_use->getProcessListElement(); - /// Update context to preserve query information in processlist (settings, current_database) - process_list_element->updateContext(context_in_use); thread_pool.scheduleOrThrowOnError( [this, @@ -855,8 +853,6 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt /// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously. auto process_list_element = context_in_use->getProcessListElement(); - /// Update context to preserve query information in processlist (settings, current_database) - process_list_element->updateContext(context_in_use); thread_pool.scheduleOrThrowOnError( [this, diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index 71d13ad4f53..0649aa5f5d7 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -158,6 +158,8 @@ void ClientApplicationBase::init(int argc, char ** argv) ("config-file,C", po::value(), "config-file path") + ("proto_caps", po::value(), "enable/disable chunked protocol: chunked_optional, notchunked, notchunked_optional, send_chunked, send_chunked_optional, send_notchunked, send_notchunked_optional, recv_chunked, recv_chunked_optional, recv_notchunked, recv_notchunked_optional") + ("query,q", po::value>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)") ("queries-file", po::value>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)") ("multiquery,n", "Obsolete, does nothing") @@ -337,6 +339,41 @@ void ClientApplicationBase::init(int argc, char ** argv) if (options.count("server_logs_file")) server_logs_file = options["server_logs_file"].as(); + if (options.count("proto_caps")) + { + std::string proto_caps_str = options["proto_caps"].as(); + + std::vector proto_caps; + splitInto<','>(proto_caps, proto_caps_str); + + for (auto cap_str : proto_caps) + { + std::string direction; + + if (cap_str.starts_with("send_")) + { + direction = "send"; + cap_str = cap_str.substr(std::string_view("send_").size()); + } + else if (cap_str.starts_with("recv_")) + { + direction = "recv"; + cap_str = cap_str.substr(std::string_view("recv_").size()); + } + + if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str); + + if (direction.empty()) + { + config().setString("proto_caps.send", std::string(cap_str)); + config().setString("proto_caps.recv", std::string(cap_str)); + } + else + config().setString("proto_caps." + direction, std::string(cap_str)); + } + } + query_processing_stage = QueryProcessingStage::fromString(options["stage"].as()); query_kind = parseQueryKind(options["query_kind"].as()); profile_events.print = options.count("print-profile-events"); diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 8f69eaf54b4..01d03006eec 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -73,9 +73,11 @@ #include #include #include +#include #include #include +#include #include "config.h" #include #include @@ -329,7 +331,11 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, const Setting { output_stream << std::endl; WriteBufferFromOStream res_buf(output_stream, 4096); - formatAST(*res, res_buf); + IAST::FormatSettings format_settings(res_buf, /* one_line */ false); + format_settings.hilite = true; + format_settings.show_secrets = true; + format_settings.print_pretty_type_names = true; + res->format(format_settings); res_buf.finalize(); output_stream << std::endl << std::endl; } @@ -914,6 +920,8 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) } catch (Exception & e) { + if (server_exception) + server_exception->rethrow(); if (!is_interactive) e.addMessage("(in query: {})", full_query); throw; @@ -1032,19 +1040,28 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa query_interrupt_handler.start(signals_before_stop); SCOPE_EXIT({ query_interrupt_handler.stop(); }); - connection->sendQuery( - connection_parameters.timeouts, - query, - query_parameters, - client_context->getCurrentQueryId(), - query_processing_stage, - &client_context->getSettingsRef(), - &client_context->getClientInfo(), - true, - [&](const Progress & progress) { onProgress(progress); }); + try { + connection->sendQuery( + connection_parameters.timeouts, + query, + query_parameters, + client_context->getCurrentQueryId(), + query_processing_stage, + &client_context->getSettingsRef(), + &client_context->getClientInfo(), + true, + [&](const Progress & progress) { onProgress(progress); }); + + if (send_external_tables) + sendExternalTables(parsed_query); + } + catch (const NetException &) + { + // We still want to attempt to process whatever we already received or can receive (socket receive buffer can be not empty) + receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); + throw; + } - if (send_external_tables) - sendExternalTables(parsed_query); receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); break; @@ -2537,6 +2554,7 @@ void ClientBase::runInteractive() *suggest, history_file, getClientConfiguration().has("multiline"), + getClientConfiguration().getBool("ignore_shell_suspend", true), query_extenders, query_delimiters, word_break_characters, diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 07f4bf19f05..e89bd7a2bf5 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -5,8 +5,6 @@ #include #include #include -#include -#include #include #include #include @@ -85,6 +83,7 @@ Connection::~Connection() Connection::Connection(const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, const String & proto_recv_chunked_, [[maybe_unused]] const SSHKey & ssh_private_key_, const String & jwt_, const String & quota_key_, @@ -95,6 +94,7 @@ Connection::Connection(const String & host_, UInt16 port_, Protocol::Secure secure_) : host(host_), port(port_), default_database(default_database_) , user(user_), password(password_) + , proto_send_chunked(proto_send_chunked_), proto_recv_chunked(proto_recv_chunked_) #if USE_SSH , ssh_private_key(ssh_private_key_) #endif @@ -211,10 +211,10 @@ void Connection::connect(const ConnectionTimeouts & timeouts) , tcp_keep_alive_timeout_in_sec); } - in = std::make_shared(*socket); + in = std::make_shared(*socket); in->setAsyncCallback(async_callback); - out = std::make_shared(*socket); + out = std::make_shared(*socket); out->setAsyncCallback(async_callback); connected = true; setDescription(); @@ -222,9 +222,61 @@ void Connection::connect(const ConnectionTimeouts & timeouts) sendHello(); receiveHello(timeouts.handshake_timeout); + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + /// Client side of chunked protocol negotiation. + /// Server advertises its protocol capabilities (separate for send and receive channels) by sending + /// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional. + /// Not optional types are strict meaning that server only supports this type, optional means that + /// server prefer this type but capable to work in opposite. + /// Client selects which type it is going to communicate based on the settings from config or arguments, + /// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake. + /// Client can detect if server's protocol capabilities are not compatible with client's settings (for example + /// server strictly requires chunked protocol but client's settings only allows notchunked protocol) - in such case + /// client should interrupt this connection. However if client continues with incompatible protocol type request, server + /// will send appropriate exception and disconnect client. + + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) + { + bool chunked_srv = chunked_srv_str.starts_with("chunked"); + bool optional_srv = chunked_srv_str.ends_with("_optional"); + bool chunked_cl = chunked_cl_str.starts_with("chunked"); + bool optional_cl = chunked_cl_str.ends_with("_optional"); + + if (optional_srv) + return chunked_cl; + if (optional_cl) + return chunked_srv; + if (chunked_cl != chunked_srv) + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: {} set to {}, server requires {}", + direction, + chunked_cl ? "chunked" : "notchunked", + chunked_srv ? "chunked" : "notchunked"); + + return chunked_srv; + }; + + proto_send_chunked = is_chunked(proto_recv_chunked_srv, proto_send_chunked, "send") ? "chunked" : "notchunked"; + proto_recv_chunked = is_chunked(proto_send_chunked_srv, proto_recv_chunked, "recv") ? "chunked" : "notchunked"; + } + else + { + if (proto_send_chunked == "chunked" || proto_recv_chunked == "chunked") + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: server's version is too old and doesn't support chunked protocol while client settings require it."); + } + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) sendAddendum(); + if (proto_send_chunked == "chunked") + out->enableChunked(); + if (proto_recv_chunked == "chunked") + in->enableChunked(); + LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.", server_name, server_version_major, server_version_minor, server_version_patch); } @@ -393,6 +445,13 @@ void Connection::sendAddendum() { if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY) writeStringBinary(quota_key, *out); + + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + writeStringBinary(proto_send_chunked, *out); + writeStringBinary(proto_recv_chunked, *out); + } + out->next(); } @@ -472,6 +531,12 @@ void Connection::receiveHello(const Poco::Timespan & handshake_timeout) else server_version_patch = server_revision; + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + readStringBinary(proto_send_chunked_srv, *in); + readStringBinary(proto_recv_chunked_srv, *in); + } + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) { UInt64 rules_size; @@ -611,6 +676,7 @@ bool Connection::ping(const ConnectionTimeouts & timeouts) UInt64 pong = 0; writeVarUInt(Protocol::Client::Ping, *out); + out->finishChunk(); out->next(); if (in->eof()) @@ -660,6 +726,7 @@ TablesStatusResponse Connection::getTablesStatus(const ConnectionTimeouts & time writeVarUInt(Protocol::Client::TablesStatusRequest, *out); request.write(*out, server_revision); + out->finishChunk(); out->next(); UInt64 response_type = 0; @@ -813,6 +880,8 @@ void Connection::sendQuery( block_profile_events_in.reset(); block_out.reset(); + out->finishChunk(); + /// Send empty block which means end of data. if (!with_pending_data) { @@ -829,6 +898,7 @@ void Connection::sendCancel() return; writeVarUInt(Protocol::Client::Cancel, *out); + out->finishChunk(); out->next(); } @@ -854,7 +924,10 @@ void Connection::sendData(const Block & block, const String & name, bool scalar) size_t prev_bytes = out->count(); block_out->write(block); - maybe_compressed_out->next(); + if (maybe_compressed_out != out) + maybe_compressed_out->next(); + if (!block) + out->finishChunk(); out->next(); if (throttler) @@ -865,6 +938,7 @@ void Connection::sendIgnoredPartUUIDs(const std::vector & uuids) { writeVarUInt(Protocol::Client::IgnoredPartUUIDs, *out); writeVectorBinary(uuids, *out); + out->finishChunk(); out->next(); } @@ -874,6 +948,7 @@ void Connection::sendReadTaskResponse(const String & response) writeVarUInt(Protocol::Client::ReadTaskResponse, *out); writeVarUInt(DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION, *out); writeStringBinary(response, *out); + out->finishChunk(); out->next(); } @@ -882,6 +957,7 @@ void Connection::sendMergeTreeReadTaskResponse(const ParallelReadResponse & resp { writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out); response.serialize(*out); + out->finishChunk(); out->next(); } @@ -899,6 +975,8 @@ void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String copyData(input, *out); else copyData(input, *out, size); + + out->finishChunk(); out->next(); } @@ -927,6 +1005,8 @@ void Connection::sendScalarsData(Scalars & data) sendData(elem.second, elem.first, true /* scalar */); } + out->finishChunk(); + out_bytes = out->count() - out_bytes; maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes; double elapsed = watch.elapsedSeconds(); @@ -1069,13 +1149,13 @@ std::optional Connection::getResolvedAddress() const bool Connection::poll(size_t timeout_microseconds) { - return static_cast(*in).poll(timeout_microseconds); + return in->poll(timeout_microseconds); } bool Connection::hasReadPendingData() const { - return last_input_packet_type.has_value() || static_cast(*in).hasPendingData(); + return last_input_packet_type.has_value() || in->hasBufferedData(); } @@ -1349,6 +1429,8 @@ ServerConnectionPtr Connection::createConnection(const ConnectionParameters & pa parameters.default_database, parameters.user, parameters.password, + parameters.proto_send_chunked, + parameters.proto_recv_chunked, parameters.ssh_private_key, parameters.jwt, parameters.quota_key, diff --git a/src/Client/Connection.h b/src/Client/Connection.h index 0f4b3e436df..ed84bc51318 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -8,8 +8,8 @@ #include -#include -#include +#include +#include #include #include @@ -52,6 +52,7 @@ public: Connection(const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, const String & proto_recv_chunked_, const SSHKey & ssh_private_key_, const String & jwt_, const String & quota_key_, @@ -170,6 +171,10 @@ private: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; + String proto_send_chunked_srv; + String proto_recv_chunked_srv; #if USE_SSH SSHKey ssh_private_key; #endif @@ -209,8 +214,8 @@ private: String server_display_name; std::unique_ptr socket; - std::shared_ptr in; - std::shared_ptr out; + std::shared_ptr in; + std::shared_ptr out; std::optional last_input_packet_type; String query_id; diff --git a/src/Client/ConnectionParameters.cpp b/src/Client/ConnectionParameters.cpp index 303bebc30d2..4d0a9ffa08c 100644 --- a/src/Client/ConnectionParameters.cpp +++ b/src/Client/ConnectionParameters.cpp @@ -107,6 +107,9 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati } } + proto_send_chunked = config.getString("proto_caps.send", "notchunked"); + proto_recv_chunked = config.getString("proto_caps.recv", "notchunked"); + quota_key = config.getString("quota_key", ""); /// By default compression is disabled if address looks like localhost. diff --git a/src/Client/ConnectionParameters.h b/src/Client/ConnectionParameters.h index c305c7813f2..382bfe34a3d 100644 --- a/src/Client/ConnectionParameters.h +++ b/src/Client/ConnectionParameters.h @@ -20,6 +20,8 @@ struct ConnectionParameters std::string default_database; std::string user; std::string password; + std::string proto_send_chunked = "notchunked"; + std::string proto_recv_chunked = "notchunked"; std::string quota_key; SSHKey ssh_private_key; std::string jwt; diff --git a/src/Client/ConnectionPool.cpp b/src/Client/ConnectionPool.cpp index ed2e7c3c725..ab8ad08826c 100644 --- a/src/Client/ConnectionPool.cpp +++ b/src/Client/ConnectionPool.cpp @@ -13,6 +13,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get( String default_database, String user, String password, + String proto_send_chunked, + String proto_recv_chunked, String quota_key, String cluster, String cluster_secret, @@ -22,7 +24,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get( Priority priority) { Key key{ - max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority}; + max_connections, host, port, default_database, user, password, proto_send_chunked, proto_recv_chunked, quota_key, cluster, cluster_secret, client_name, compression, secure, priority}; std::lock_guard lock(mutex); auto [it, inserted] = pools.emplace(key, ConnectionPoolPtr{}); @@ -39,6 +41,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get( default_database, user, password, + proto_send_chunked, + proto_recv_chunked, quota_key, cluster, cluster_secret, diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h index 0fcb3c4e7e1..219548b62a0 100644 --- a/src/Client/ConnectionPool.h +++ b/src/Client/ConnectionPool.h @@ -73,6 +73,8 @@ public: const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, + const String & proto_recv_chunked_, const String & quota_key_, const String & cluster_, const String & cluster_secret_, @@ -85,6 +87,8 @@ public: , default_database(default_database_) , user(user_) , password(password_) + , proto_send_chunked(proto_send_chunked_) + , proto_recv_chunked(proto_recv_chunked_) , quota_key(quota_key_) , cluster(cluster_) , cluster_secret(cluster_secret_) @@ -116,7 +120,9 @@ protected: { return std::make_shared( host, port, - default_database, user, password, SSHKey(), /*jwt*/ "", quota_key, + default_database, user, password, + proto_send_chunked, proto_recv_chunked, + SSHKey(), /*jwt*/ "", quota_key, cluster, cluster_secret, client_name, compression, secure); } @@ -125,6 +131,8 @@ private: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; String quota_key; /// For inter-server authorization @@ -150,6 +158,8 @@ public: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; String quota_key; String cluster; String cluster_secret; @@ -173,6 +183,8 @@ public: String default_database, String user, String password, + String proto_send_chunked, + String proto_recv_chunked, String quota_key, String cluster, String cluster_secret, @@ -190,6 +202,7 @@ inline bool operator==(const ConnectionPoolFactory::Key & lhs, const ConnectionP { return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port && lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password + && lhs.proto_send_chunked == rhs.proto_send_chunked && lhs.proto_recv_chunked == rhs.proto_recv_chunked && lhs.quota_key == rhs.quota_key && lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name && lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority; diff --git a/src/Client/ReplxxLineReader.cpp b/src/Client/ReplxxLineReader.cpp index 3b3508d1a58..78ae6c5eb15 100644 --- a/src/Client/ReplxxLineReader.cpp +++ b/src/Client/ReplxxLineReader.cpp @@ -294,6 +294,7 @@ ReplxxLineReader::ReplxxLineReader( Suggest & suggest, const String & history_file_path_, bool multiline_, + bool ignore_shell_suspend, Patterns extenders_, Patterns delimiters_, const char word_break_characters_[], @@ -363,7 +364,8 @@ ReplxxLineReader::ReplxxLineReader( rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); }); /// We don't want the default, "suspend" behavior, it confuses people. - rx.bind_key_internal(replxx::Replxx::KEY::control('Z'), "insert_character"); + if (ignore_shell_suspend) + rx.bind_key_internal(replxx::Replxx::KEY::control('Z'), "insert_character"); auto commit_action = [this](char32_t code) { diff --git a/src/Client/ReplxxLineReader.h b/src/Client/ReplxxLineReader.h index c46080420ef..1dbad2c70dd 100644 --- a/src/Client/ReplxxLineReader.h +++ b/src/Client/ReplxxLineReader.h @@ -15,6 +15,7 @@ public: Suggest & suggest, const String & history_file_path, bool multiline, + bool ignore_shell_suspend, Patterns extenders_, Patterns delimiters_, const char word_break_characters_[], diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 26841879474..83d4c24c769 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -452,6 +452,11 @@ void ColumnArray::reserve(size_t n) getData().reserve(n); /// The average size of arrays is not taken into account here. Or it is considered to be no more than 1. } +size_t ColumnArray::capacity() const +{ + return getOffsets().capacity(); +} + void ColumnArray::prepareForSquashing(const Columns & source_columns) { size_t new_size = size(); diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index d6f71b72940..f77268a8be6 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -118,6 +118,7 @@ public: void updatePermutationWithCollation(const Collator & collator, PermutationSortDirection direction, PermutationSortStability stability, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override; void reserve(size_t n) override; + size_t capacity() const override; void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; diff --git a/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h index 07120f5f035..6f8360a54dd 100644 --- a/src/Columns/ColumnDecimal.h +++ b/src/Columns/ColumnDecimal.h @@ -53,6 +53,7 @@ public: size_t allocatedBytes() const override { return data.allocated_bytes(); } void protect() override { data.protect(); } void reserve(size_t n) override { data.reserve_exact(n); } + size_t capacity() const override { return data.capacity(); } void shrinkToFit() override { data.shrink_to_fit(); } #if !defined(DEBUG_OR_SANITIZER_BUILD) diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index 69b4c5dfc4e..1f37add9d2d 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -16,7 +16,6 @@ #include #include #include -#include namespace DB { @@ -56,6 +55,7 @@ ColumnDynamic::ColumnDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_ ColumnDynamic::ColumnDynamic( MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_) : variant_column(std::move(variant_column_)) + , variant_column_ptr(assert_cast(variant_column.get())) , max_dynamic_types(max_dynamic_types_) , global_max_dynamic_types(global_max_dynamic_types_) , statistics(statistics_) @@ -66,6 +66,7 @@ ColumnDynamic::ColumnDynamic( ColumnDynamic::ColumnDynamic( MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_) : variant_column(std::move(variant_column_)) + , variant_column_ptr(assert_cast(variant_column.get())) , variant_info(variant_info_) , max_dynamic_types(max_dynamic_types_) , global_max_dynamic_types(global_max_dynamic_types_) @@ -79,6 +80,7 @@ void ColumnDynamic::setVariantType(const DataTypePtr & variant_type) throw Exception(ErrorCodes::LOGICAL_ERROR, "Setting specific variant type is allowed only for empty dynamic column"); variant_column = variant_type->createColumn(); + variant_column_ptr = assert_cast(variant_column.get()); createVariantInfo(variant_type); } @@ -313,12 +315,12 @@ void ColumnDynamic::doInsertFrom(const IColumn & src_, size_t n) /// Check if we have the same variants in both columns. if (variant_info.variant_name == dynamic_src.variant_info.variant_name) { - variant_column->insertFrom(*dynamic_src.variant_column, n); + variant_column_ptr->insertFrom(*dynamic_src.variant_column, n); return; } - auto & variant_col = assert_cast(*variant_column); - const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); + auto & variant_col = getVariantColumn(); + const auto & src_variant_col = dynamic_src.getVariantColumn(); auto src_global_discr = src_variant_col.globalDiscriminatorAt(n); auto src_offset = src_variant_col.offsetAt(n); @@ -386,16 +388,15 @@ void ColumnDynamic::doInsertRangeFrom(const IColumn & src_, size_t start, size_t "[start({}) + length({}) > src.size()({})]", start, length, src_.size()); const auto & dynamic_src = assert_cast(src_); + auto & variant_col = getVariantColumn(); /// Check if we have the same variants in both columns. if (variant_info.variant_names == dynamic_src.variant_info.variant_names) { - variant_column->insertRangeFrom(*dynamic_src.variant_column, start, length); + variant_col.insertRangeFrom(*dynamic_src.variant_column, start, length); return; } - auto & variant_col = assert_cast(*variant_column); - /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { @@ -602,15 +603,15 @@ void ColumnDynamic::doInsertManyFrom(const IColumn & src_, size_t position, size #endif { const auto & dynamic_src = assert_cast(src_); + auto & variant_col = getVariantColumn(); /// Check if we have the same variants in both columns. if (variant_info.variant_names == dynamic_src.variant_info.variant_names) { - variant_column->insertManyFrom(*dynamic_src.variant_column, position, length); + variant_col.insertManyFrom(*dynamic_src.variant_column, position, length); return; } - auto & variant_col = assert_cast(*variant_column); const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); auto src_global_discr = src_variant_col.globalDiscriminatorAt(position); auto src_offset = src_variant_col.offsetAt(position); @@ -751,7 +752,7 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, Arena & arena, const const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos) { - auto & variant_col = assert_cast(*variant_column); + auto & variant_col = getVariantColumn(); UInt8 null_bit = unalignedLoad(pos); pos += sizeof(UInt8); if (null_bit) @@ -808,7 +809,7 @@ const char * ColumnDynamic::skipSerializedInArena(const char * pos) const void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const { - const auto & variant_col = assert_cast(*variant_column); + const auto & variant_col = getVariantColumn(); auto discr = variant_col.globalDiscriminatorAt(n); if (discr == ColumnVariant::NULL_DISCRIMINATOR) { @@ -826,9 +827,9 @@ int ColumnDynamic::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_di int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #endif { - const auto & left_variant = assert_cast(*variant_column); + const auto & left_variant = getVariantColumn(); const auto & right_dynamic = assert_cast(rhs); - const auto & right_variant = assert_cast(*right_dynamic.variant_column); + const auto & right_variant = right_dynamic.getVariantColumn(); auto left_discr = left_variant.globalDiscriminatorAt(n); auto left_shared_variant_discr = getSharedVariantDiscriminator(); @@ -970,7 +971,7 @@ void ColumnDynamic::updatePermutation(IColumn::PermutationSortDirection directio ColumnPtr ColumnDynamic::compress() const { - ColumnPtr variant_compressed = variant_column->compress(); + ColumnPtr variant_compressed = variant_column_ptr->compress(); size_t byte_size = variant_compressed->byteSize(); return ColumnCompressed::create(size(), byte_size, [my_variant_compressed = std::move(variant_compressed), my_variant_info = variant_info, my_max_dynamic_types = max_dynamic_types, my_global_max_dynamic_types = global_max_dynamic_types, my_statistics = statistics]() mutable @@ -998,7 +999,18 @@ void ColumnDynamic::prepareForSquashing(const Columns & source_columns) variant_col.getLocalDiscriminators().reserve_exact(new_size); variant_col.getOffsets().reserve_exact(new_size); - /// Second, collect all variants and their total sizes. + /// Second, preallocate memory for variants. + prepareVariantsForSquashing(source_columns); +} + +void ColumnDynamic::prepareVariantsForSquashing(const Columns & source_columns) +{ + /// Internal variants of source dynamic columns may differ. + /// We want to preallocate memory for all variants we will have after squashing. + /// It may happen that the total number of variants in source columns will + /// exceed the limit, in this case we will choose the most frequent variants. + + /// Collect all variants and their total sizes. std::unordered_map total_variant_sizes; DataTypes all_variants; @@ -1072,6 +1084,7 @@ void ColumnDynamic::prepareForSquashing(const Columns & source_columns) /// Now current dynamic column has all resulting variants and we can call /// prepareForSquashing on them to preallocate the memory. + auto & variant_col = getVariantColumn(); for (size_t i = 0; i != variant_info.variant_names.size(); ++i) { Columns source_variant_columns; @@ -1240,12 +1253,12 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source void ColumnDynamic::applyNullMap(const ColumnVector::Container & null_map) { - assert_cast(*variant_column).applyNullMap(null_map); + variant_column_ptr->applyNullMap(null_map); } void ColumnDynamic::applyNegatedNullMap(const ColumnVector::Container & null_map) { - assert_cast(*variant_column).applyNegatedNullMap(null_map); + variant_column_ptr->applyNegatedNullMap(null_map); } } diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index e6e720765f6..2ae862de3af 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -106,7 +106,7 @@ public: return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, global_max_dynamic_types_, statistics_); } - static MutablePtr create(size_t max_dynamic_types_) + static MutablePtr create(size_t max_dynamic_types_ = MAX_DYNAMIC_TYPES_LIMIT) { return Base::create(max_dynamic_types_); } @@ -136,7 +136,7 @@ public: size_t size() const override { - return variant_column->size(); + return variant_column_ptr->size(); } Field operator[](size_t n) const override; @@ -145,22 +145,22 @@ public: bool isDefaultAt(size_t n) const override { - return variant_column->isDefaultAt(n); + return variant_column_ptr->isDefaultAt(n); } bool isNullAt(size_t n) const override { - return variant_column->isNullAt(n); + return variant_column_ptr->isNullAt(n); } StringRef getDataAt(size_t n) const override { - return variant_column->getDataAt(n); + return variant_column_ptr->getDataAt(n); } void insertData(const char * pos, size_t length) override { - variant_column->insertData(pos, length); + variant_column_ptr->insertData(pos, length); } void insert(const Field & x) override; @@ -178,17 +178,17 @@ public: void insertDefault() override { - variant_column->insertDefault(); + variant_column_ptr->insertDefault(); } void insertManyDefaults(size_t length) override { - variant_column->insertManyDefaults(length); + variant_column_ptr->insertManyDefaults(length); } void popBack(size_t n) override { - variant_column->popBack(n); + variant_column_ptr->popBack(n); } StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override; @@ -199,42 +199,42 @@ public: WeakHash32 getWeakHash32() const override { - return variant_column->getWeakHash32(); + return variant_column_ptr->getWeakHash32(); } void updateHashFast(SipHash & hash) const override { - variant_column->updateHashFast(hash); + variant_column_ptr->updateHashFast(hash); } ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override { - return create(variant_column->filter(filt, result_size_hint), variant_info, max_dynamic_types, global_max_dynamic_types); + return create(variant_column_ptr->filter(filt, result_size_hint), variant_info, max_dynamic_types, global_max_dynamic_types); } void expand(const Filter & mask, bool inverted) override { - variant_column->expand(mask, inverted); + variant_column_ptr->expand(mask, inverted); } ColumnPtr permute(const Permutation & perm, size_t limit) const override { - return create(variant_column->permute(perm, limit), variant_info, max_dynamic_types, global_max_dynamic_types); + return create(variant_column_ptr->permute(perm, limit), variant_info, max_dynamic_types, global_max_dynamic_types); } ColumnPtr index(const IColumn & indexes, size_t limit) const override { - return create(variant_column->index(indexes, limit), variant_info, max_dynamic_types, global_max_dynamic_types); + return create(variant_column_ptr->index(indexes, limit), variant_info, max_dynamic_types, global_max_dynamic_types); } ColumnPtr replicate(const Offsets & replicate_offsets) const override { - return create(variant_column->replicate(replicate_offsets), variant_info, max_dynamic_types, global_max_dynamic_types); + return create(variant_column_ptr->replicate(replicate_offsets), variant_info, max_dynamic_types, global_max_dynamic_types); } MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override { - MutableColumns scattered_variant_columns = variant_column->scatter(num_columns, selector); + MutableColumns scattered_variant_columns = variant_column_ptr->scatter(num_columns, selector); MutableColumns scattered_columns; scattered_columns.reserve(num_columns); for (auto & scattered_variant_column : scattered_variant_columns) @@ -251,12 +251,12 @@ public: bool hasEqualValues() const override { - return variant_column->hasEqualValues(); + return variant_column_ptr->hasEqualValues(); } void getExtremes(Field & min, Field & max) const override { - variant_column->getExtremes(min, max); + variant_column_ptr->getExtremes(min, max); } void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, @@ -267,44 +267,53 @@ public: void reserve(size_t n) override { - variant_column->reserve(n); + variant_column_ptr->reserve(n); + } + + size_t capacity() const override + { + return variant_column_ptr->capacity(); } void prepareForSquashing(const Columns & source_columns) override; + /// Prepare only variants but not discriminators and offsets. + void prepareVariantsForSquashing(const Columns & source_columns); void ensureOwnership() override { - variant_column->ensureOwnership(); + variant_column_ptr->ensureOwnership(); } size_t byteSize() const override { - return variant_column->byteSize(); + return variant_column_ptr->byteSize(); } size_t byteSizeAt(size_t n) const override { - return variant_column->byteSizeAt(n); + return variant_column_ptr->byteSizeAt(n); } size_t allocatedBytes() const override { - return variant_column->allocatedBytes(); + return variant_column_ptr->allocatedBytes(); } void protect() override { - variant_column->protect(); + variant_column_ptr->protect(); } void forEachSubcolumn(MutableColumnCallback callback) override { callback(variant_column); + variant_column_ptr = assert_cast(variant_column.get()); } void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override { callback(*variant_column); + variant_column_ptr = assert_cast(variant_column.get()); variant_column->forEachSubcolumnRecursively(callback); } @@ -319,27 +328,27 @@ public: double getRatioOfDefaultRows(double sample_ratio) const override { - return variant_column->getRatioOfDefaultRows(sample_ratio); + return variant_column_ptr->getRatioOfDefaultRows(sample_ratio); } UInt64 getNumberOfDefaultRows() const override { - return variant_column->getNumberOfDefaultRows(); + return variant_column_ptr->getNumberOfDefaultRows(); } void getIndicesOfNonDefaultRows(Offsets & indices, size_t from, size_t limit) const override { - variant_column->getIndicesOfNonDefaultRows(indices, from, limit); + variant_column_ptr->getIndicesOfNonDefaultRows(indices, from, limit); } void finalize() override { - variant_column->finalize(); + variant_column_ptr->finalize(); } bool isFinalized() const override { - return variant_column->isFinalized(); + return variant_column_ptr->isFinalized(); } /// Apply null map to a nested Variant column. @@ -351,8 +360,8 @@ public: const ColumnPtr & getVariantColumnPtr() const { return variant_column; } ColumnPtr & getVariantColumnPtr() { return variant_column; } - const ColumnVariant & getVariantColumn() const { return assert_cast(*variant_column); } - ColumnVariant & getVariantColumn() { return assert_cast(*variant_column); } + const ColumnVariant & getVariantColumn() const { return *variant_column_ptr; } + ColumnVariant & getVariantColumn() { return *variant_column_ptr; } bool addNewVariant(const DataTypePtr & new_variant, const String & new_variant_name); bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); } @@ -420,6 +429,7 @@ public: } const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) const { return getVariantSerialization(variant_type, variant_type->getName()); } + private: void createVariantInfo(const DataTypePtr & variant_type); @@ -432,6 +442,10 @@ private: void updateVariantInfoAndExpandVariantColumn(const DataTypePtr & new_variant_type); WrappedPtr variant_column; + /// Store and use pointer to ColumnVariant to avoid virtual calls. + /// ColumnDynamic is widely used inside ColumnObject for each path and + /// with hundreds of paths these virtual calls are noticeable. + ColumnVariant * variant_column_ptr; /// Store the type of current variant with some additional information. VariantInfo variant_info; /// The maximum number of different types that can be stored in this Dynamic column. diff --git a/src/Columns/ColumnFixedString.h b/src/Columns/ColumnFixedString.h index 676ac7712ba..8cf0a6a57da 100644 --- a/src/Columns/ColumnFixedString.h +++ b/src/Columns/ColumnFixedString.h @@ -182,6 +182,11 @@ public: chars.reserve_exact(n * size); } + size_t capacity() const override + { + return chars.capacity() / n; + } + void shrinkToFit() override { chars.shrink_to_fit(); diff --git a/src/Columns/ColumnLowCardinality.h b/src/Columns/ColumnLowCardinality.h index 3766b247d60..3cc1c8919c0 100644 --- a/src/Columns/ColumnLowCardinality.h +++ b/src/Columns/ColumnLowCardinality.h @@ -46,8 +46,8 @@ public: return Base::create(std::move(column_unique), std::move(indexes), is_shared); } - std::string getName() const override { return "ColumnLowCardinality"; } - const char * getFamilyName() const override { return "ColumnLowCardinality"; } + std::string getName() const override { return "LowCardinality(" + getDictionary().getNestedColumn()->getName() + ")"; } + const char * getFamilyName() const override { return "LowCardinality"; } TypeIndex getDataType() const override { return TypeIndex::LowCardinality; } ColumnPtr convertToFullColumn() const { return getDictionary().getNestedColumn()->index(getIndexes(), 0); } @@ -172,6 +172,7 @@ public: } void reserve(size_t n) override { idx.reserve(n); } + size_t capacity() const override { return idx.capacity(); } void shrinkToFit() override { idx.shrinkToFit(); } /// Don't count the dictionary size as it can be shared between different blocks. @@ -309,6 +310,7 @@ public: void popBack(size_t n) { positions->popBack(n); } void reserve(size_t n) { positions->reserve(n); } + size_t capacity() const { return positions->capacity(); } void shrinkToFit() { positions->shrinkToFit(); } UInt64 getMaxPositionForCurrentType() const; diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 651b073fef7..536da4d06d0 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -249,6 +249,11 @@ void ColumnMap::reserve(size_t n) nested->reserve(n); } +size_t ColumnMap::capacity() const +{ + return nested->capacity(); +} + void ColumnMap::prepareForSquashing(const Columns & source_columns) { Columns nested_source_columns; diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index 191476839f1..39d15a586b9 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -94,6 +94,7 @@ public: void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override; + size_t capacity() const override; void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 2a25cac6461..ec375ea5a8d 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -706,6 +706,11 @@ void ColumnNullable::reserve(size_t n) getNullMapData().reserve(n); } +size_t ColumnNullable::capacity() const +{ + return getNullMapData().capacity(); +} + void ColumnNullable::prepareForSquashing(const Columns & source_columns) { size_t new_size = size(); diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h index 2c32e0fe5a0..78274baca51 100644 --- a/src/Columns/ColumnNullable.h +++ b/src/Columns/ColumnNullable.h @@ -125,6 +125,7 @@ public: size_t limit, int null_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override; size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override; void reserve(size_t n) override; + size_t capacity() const override; void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index eb99bb4081b..b7194ef50e7 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1,766 +1,444 @@ -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - +#include +#include +#include +#include +#include +#include namespace DB { namespace ErrorCodes { - extern const int ARGUMENT_OUT_OF_BOUND; - extern const int DUPLICATE_COLUMN; - extern const int EXPERIMENTAL_FEATURE_ERROR; - extern const int ILLEGAL_COLUMN; - extern const int NUMBER_OF_DIMENSIONS_MISMATCHED; - extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; + extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; } namespace { -/// Recreates column with default scalar values and keeps sizes of arrays. -ColumnPtr recreateColumnWithDefaultValues( - const ColumnPtr & column, const DataTypePtr & scalar_type, size_t num_dimensions) +const FormatSettings & getFormatSettings() { - const auto * column_array = checkAndGetColumn(column.get()); - if (column_array && num_dimensions) - { - return ColumnArray::create( - recreateColumnWithDefaultValues( - column_array->getDataPtr(), scalar_type, num_dimensions - 1), - IColumn::mutate(column_array->getOffsetsPtr())); - } - - return createArrayOfType(scalar_type, num_dimensions)->createColumn()->cloneResized(column->size()); + static const FormatSettings settings; + return settings; } -/// Replaces NULL fields to given field or empty array. -class FieldVisitorReplaceNull : public StaticVisitor +const std::shared_ptr & getDynamicSerialization() { -public: - explicit FieldVisitorReplaceNull( - const Field & replacement_, size_t num_dimensions_) - : replacement(replacement_) - , num_dimensions(num_dimensions_) - { - } - - Field operator()(const Null &) const - { - return num_dimensions ? Array() : replacement; - } - - Field operator()(const Array & x) const - { - assert(num_dimensions > 0); - const size_t size = x.size(); - Array res(size); - for (size_t i = 0; i < size; ++i) - res[i] = applyVisitor(FieldVisitorReplaceNull(replacement, num_dimensions - 1), x[i]); - return res; - } - - template - Field operator()(const T & x) const { return x; } - -private: - const Field & replacement; - size_t num_dimensions; -}; - -/// Visitor that allows to get type of scalar field -/// or least common type of scalars in array. -/// More optimized version of FieldToDataType. -class FieldVisitorToScalarType : public StaticVisitor<> -{ -public: - using FieldType = Field::Types::Which; - - void operator()(const Array & x) - { - size_t size = x.size(); - for (size_t i = 0; i < size; ++i) - applyVisitor(*this, x[i]); - } - - void operator()(const UInt64 & x) - { - field_types.insert(FieldType::UInt64); - if (x <= std::numeric_limits::max()) - type_indexes.insert(TypeIndex::UInt8); - else if (x <= std::numeric_limits::max()) - type_indexes.insert(TypeIndex::UInt16); - else if (x <= std::numeric_limits::max()) - type_indexes.insert(TypeIndex::UInt32); - else - type_indexes.insert(TypeIndex::UInt64); - } - - void operator()(const Int64 & x) - { - field_types.insert(FieldType::Int64); - if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) - type_indexes.insert(TypeIndex::Int8); - else if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) - type_indexes.insert(TypeIndex::Int16); - else if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) - type_indexes.insert(TypeIndex::Int32); - else - type_indexes.insert(TypeIndex::Int64); - } - - void operator()(const bool &) - { - field_types.insert(FieldType::UInt64); - type_indexes.insert(TypeIndex::UInt8); - } - - void operator()(const Null &) - { - have_nulls = true; - } - - template - void operator()(const T &) - { - field_types.insert(Field::TypeToEnum>::value); - type_indexes.insert(TypeToTypeIndex>); - } - - DataTypePtr getScalarType() const { return getLeastSupertypeOrString(type_indexes); } - bool haveNulls() const { return have_nulls; } - bool needConvertField() const { return field_types.size() > 1; } - -private: - TypeIndexSet type_indexes; - std::unordered_set field_types; - bool have_nulls = false; -}; + static const std::shared_ptr dynamic_serialization = std::make_shared(); + return dynamic_serialization; +} } -FieldInfo getFieldInfo(const Field & field) +ColumnObject::ColumnObject( + std::unordered_map typed_paths_, + std::unordered_map dynamic_paths_, + MutableColumnPtr shared_data_, + size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, + size_t max_dynamic_types_, + const StatisticsPtr & statistics_) + : shared_data(std::move(shared_data_)) + , max_dynamic_paths(max_dynamic_paths_) + , global_max_dynamic_paths(global_max_dynamic_paths_) + , max_dynamic_types(max_dynamic_types_) + , statistics(statistics_) { - FieldVisitorToScalarType to_scalar_type_visitor; - applyVisitor(to_scalar_type_visitor, field); - FieldVisitorToNumberOfDimensions to_number_dimension_visitor; + typed_paths.reserve(typed_paths_.size()); + for (auto & [path, column] : typed_paths_) + typed_paths[path] = std::move(column); - return + dynamic_paths.reserve(dynamic_paths_.size()); + dynamic_paths_ptrs.reserve(dynamic_paths_.size()); + for (auto & [path, column] : dynamic_paths_) { - to_scalar_type_visitor.getScalarType(), - to_scalar_type_visitor.haveNulls(), - to_scalar_type_visitor.needConvertField(), - applyVisitor(to_number_dimension_visitor, field), - to_number_dimension_visitor.need_fold_dimension - }; -} - -ColumnObject::Subcolumn::Subcolumn(MutableColumnPtr && data_, bool is_nullable_) - : least_common_type(getDataTypeByColumn(*data_)) - , is_nullable(is_nullable_) - , num_rows(data_->size()) -{ - data.push_back(std::move(data_)); -} - -ColumnObject::Subcolumn::Subcolumn( - size_t size_, bool is_nullable_) - : least_common_type(std::make_shared()) - , is_nullable(is_nullable_) - , num_of_defaults_in_prefix(size_) - , num_rows(size_) -{ -} - -size_t ColumnObject::Subcolumn::size() const -{ - return num_rows; -} - -size_t ColumnObject::Subcolumn::byteSize() const -{ - size_t res = 0; - for (const auto & part : data) - res += part->byteSize(); - return res; -} - -size_t ColumnObject::Subcolumn::allocatedBytes() const -{ - size_t res = 0; - for (const auto & part : data) - res += part->allocatedBytes(); - return res; -} - -void ColumnObject::Subcolumn::get(size_t n, Field & res) const -{ - if (isFinalized()) - { - getFinalizedColumn().get(n, res); - return; - } - - size_t ind = n; - if (ind < num_of_defaults_in_prefix) - { - res = least_common_type.get()->getDefault(); - return; - } - - ind -= num_of_defaults_in_prefix; - for (const auto & part : data) - { - if (ind < part->size()) - { - part->get(ind, res); - res = convertFieldToTypeOrThrow(res, *least_common_type.get()); - return; - } - - ind -= part->size(); - } - - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for getting field is out of range", n); -} - -void ColumnObject::Subcolumn::checkTypes() const -{ - DataTypes prefix_types; - prefix_types.reserve(data.size()); - for (size_t i = 0; i < data.size(); ++i) - { - auto current_type = getDataTypeByColumn(*data[i]); - prefix_types.push_back(current_type); - auto prefix_common_type = getLeastSupertype(prefix_types); - if (!prefix_common_type->equals(*current_type)) - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, - "Data type {} of column at position {} cannot represent all columns from i-th prefix", - current_type->getName(), i); + dynamic_paths[path] = std::move(column); + dynamic_paths_ptrs[path] = assert_cast(dynamic_paths[path].get()); } } -void ColumnObject::Subcolumn::insert(Field field) +ColumnObject::ColumnObject( + std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_) + : max_dynamic_paths(max_dynamic_paths_), global_max_dynamic_paths(max_dynamic_paths_), max_dynamic_types(max_dynamic_types_) { - auto info = DB::getFieldInfo(field); - insert(std::move(field), std::move(info)); -} - -void ColumnObject::Subcolumn::addNewColumnPart(DataTypePtr type) -{ - auto serialization = type->getSerialization(ISerialization::Kind::SPARSE); - data.push_back(type->createColumn(*serialization)); - least_common_type = LeastCommonType{std::move(type)}; -} - -static bool isConversionRequiredBetweenIntegers(const IDataType & lhs, const IDataType & rhs) -{ - /// If both of types are signed/unsigned integers and size of left field type - /// is less than right type, we don't need to convert field, - /// because all integer fields are stored in Int64/UInt64. - - WhichDataType which_lhs(lhs); - WhichDataType which_rhs(rhs); - - bool is_native_int = which_lhs.isNativeInt() && which_rhs.isNativeInt(); - bool is_native_uint = which_lhs.isNativeUInt() && which_rhs.isNativeUInt(); - - return (!is_native_int && !is_native_uint) - || lhs.getSizeOfValueInMemory() > rhs.getSizeOfValueInMemory(); -} - -void ColumnObject::Subcolumn::insert(Field field, FieldInfo info) -{ - auto base_type = std::move(info.scalar_type); - - if (isNothing(base_type) && info.num_dimensions == 0) + typed_paths.reserve(typed_paths_.size()); + for (auto & [path, column] : typed_paths_) { - insertDefault(); - return; + if (!column->empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected non-empty typed path column in ColumnObject constructor"); + typed_paths[path] = std::move(column); } - auto column_dim = least_common_type.getNumberOfDimensions(); - auto value_dim = info.num_dimensions; - - if (isNothing(least_common_type.get())) - column_dim = value_dim; - - if (isNothing(base_type)) - value_dim = column_dim; - - if (value_dim != column_dim) - throw Exception(ErrorCodes::NUMBER_OF_DIMENSIONS_MISMATCHED, - "Dimension of types mismatched between inserted value and column. " - "Dimension of value: {}. Dimension of column: {}", - value_dim, column_dim); - - if (is_nullable) - base_type = makeNullable(base_type); - - if (!is_nullable && info.have_nulls) - field = applyVisitor(FieldVisitorReplaceNull(base_type->getDefault(), value_dim), std::move(field)); - - bool type_changed = false; - const auto & least_common_base_type = least_common_type.getBase(); - - if (data.empty()) - { - addNewColumnPart(createArrayOfType(std::move(base_type), value_dim)); - } - else if (!least_common_base_type->equals(*base_type) && !isNothing(base_type)) - { - if (isConversionRequiredBetweenIntegers(*base_type, *least_common_base_type)) - { - base_type = getLeastSupertypeOrString(DataTypes{std::move(base_type), least_common_base_type}); - type_changed = true; - if (!least_common_base_type->equals(*base_type)) - addNewColumnPart(createArrayOfType(std::move(base_type), value_dim)); - } - } - - if (type_changed || info.need_convert) - field = convertFieldToTypeOrThrow(field, *least_common_type.get()); - - if (!data.back()->tryInsert(field)) - { - /** Normalization of the field above is pretty complicated (it uses several FieldVisitors), - * so in the case of a bug, we may get mismatched types. - * The `IColumn::insert` method does not check the type of the inserted field, and it can lead to a segmentation fault. - * Therefore, we use the safer `tryInsert` method to get an exception instead of a segmentation fault. - */ - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, - "Cannot insert field {} to column {}", - field.dump(), data.back()->dumpStructure()); - } - - ++num_rows; + MutableColumns paths_and_values; + paths_and_values.emplace_back(ColumnString::create()); + paths_and_values.emplace_back(ColumnString::create()); + shared_data = ColumnArray::create(ColumnTuple::create(std::move(paths_and_values))); } -void ColumnObject::Subcolumn::insertRangeFrom(const Subcolumn & src, size_t start, size_t length) +ColumnObject::Ptr ColumnObject::create( + const std::unordered_map & typed_paths_, + const std::unordered_map & dynamic_paths_, + const ColumnPtr & shared_data_, + size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, + size_t max_dynamic_types_, + const ColumnObject::StatisticsPtr & statistics_) { - assert(start + length <= src.size()); - size_t end = start + length; - num_rows += length; + std::unordered_map mutable_typed_paths; + mutable_typed_paths.reserve(typed_paths_.size()); + for (const auto & [path, column] : typed_paths_) + mutable_typed_paths[path] = typed_paths_.at(path)->assumeMutable(); - if (data.empty()) - { - addNewColumnPart(src.getLeastCommonType()); - } - else if (!least_common_type.get()->equals(*src.getLeastCommonType())) - { - auto new_least_common_type = getLeastSupertypeOrString(DataTypes{least_common_type.get(), src.getLeastCommonType()}); - if (!new_least_common_type->equals(*least_common_type.get())) - addNewColumnPart(std::move(new_least_common_type)); - } + std::unordered_map mutable_dynamic_paths; + mutable_dynamic_paths.reserve(dynamic_paths_.size()); + for (const auto & [path, column] : dynamic_paths_) + mutable_dynamic_paths[path] = dynamic_paths_.at(path)->assumeMutable(); - if (end <= src.num_of_defaults_in_prefix) - { - data.back()->insertManyDefaults(length); - return; - } - - if (start < src.num_of_defaults_in_prefix) - data.back()->insertManyDefaults(src.num_of_defaults_in_prefix - start); - - auto insert_from_part = [&](const auto & column, size_t from, size_t n) - { - assert(from + n <= column->size()); - auto column_type = getDataTypeByColumn(*column); - - if (column_type->equals(*least_common_type.get())) - { - data.back()->insertRangeFrom(*column, from, n); - return; - } - - /// If we need to insert large range, there is no sense to cut part of column and cast it. - /// Casting of all column and inserting from it can be faster. - /// Threshold is just a guess. - - if (n * 3 >= column->size()) - { - auto casted_column = castColumn({column, column_type, ""}, least_common_type.get()); - data.back()->insertRangeFrom(*casted_column, from, n); - return; - } - - auto casted_column = column->cut(from, n); - casted_column = castColumn({casted_column, column_type, ""}, least_common_type.get()); - data.back()->insertRangeFrom(*casted_column, 0, n); - }; - - size_t pos = 0; - size_t processed_rows = src.num_of_defaults_in_prefix; - - /// Find the first part of the column that intersects the range. - while (pos < src.data.size() && processed_rows + src.data[pos]->size() < start) - { - processed_rows += src.data[pos]->size(); - ++pos; - } - - /// Insert from the first part of column. - if (pos < src.data.size() && processed_rows < start) - { - size_t part_start = start - processed_rows; - size_t part_length = std::min(src.data[pos]->size() - part_start, end - start); - insert_from_part(src.data[pos], part_start, part_length); - processed_rows += src.data[pos]->size(); - ++pos; - } - - /// Insert from the parts of column in the middle of range. - while (pos < src.data.size() && processed_rows + src.data[pos]->size() < end) - { - insert_from_part(src.data[pos], 0, src.data[pos]->size()); - processed_rows += src.data[pos]->size(); - ++pos; - } - - /// Insert from the last part of column if needed. - if (pos < src.data.size() && processed_rows < end) - { - size_t part_end = end - processed_rows; - insert_from_part(src.data[pos], 0, part_end); - } + return ColumnObject::create( + std::move(mutable_typed_paths), + std::move(mutable_dynamic_paths), + shared_data_->assumeMutable(), + max_dynamic_paths_, + global_max_dynamic_paths_, + max_dynamic_types_, + statistics_); } -bool ColumnObject::Subcolumn::isFinalized() const +ColumnObject::MutablePtr ColumnObject::create( + std::unordered_map typed_paths_, + std::unordered_map dynamic_paths_, + MutableColumnPtr shared_data_, + size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, + size_t max_dynamic_types_, + const ColumnObject::StatisticsPtr & statistics_) { - return num_of_defaults_in_prefix == 0 && - (data.empty() || (data.size() == 1 && !data[0]->isSparse())); + return Base::create(std::move(typed_paths_), std::move(dynamic_paths_), std::move(shared_data_), max_dynamic_paths_, global_max_dynamic_paths_, max_dynamic_types_, statistics_); } -void ColumnObject::Subcolumn::finalize() +ColumnObject::MutablePtr ColumnObject::create(std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_) { - if (isFinalized()) - return; - - if (data.size() == 1 && num_of_defaults_in_prefix == 0) - { - data[0] = data[0]->convertToFullColumnIfSparse(); - return; - } - - const auto & to_type = least_common_type.get(); - auto result_column = to_type->createColumn(); - - if (num_of_defaults_in_prefix) - result_column->insertManyDefaults(num_of_defaults_in_prefix); - - for (auto & part : data) - { - part = part->convertToFullColumnIfSparse(); - auto from_type = getDataTypeByColumn(*part); - size_t part_size = part->size(); - - if (!from_type->equals(*to_type)) - { - auto offsets = ColumnUInt64::create(); - auto & offsets_data = offsets->getData(); - - /// We need to convert only non-default values and then recreate column - /// with default value of new type, because default values (which represents misses in data) - /// may be inconsistent between types (e.g "0" in UInt64 and empty string in String). - - part->getIndicesOfNonDefaultRows(offsets_data, 0, part_size); - - if (offsets->size() == part_size) - { - part = castColumn({part, from_type, ""}, to_type); - } - else - { - auto values = part->index(*offsets, offsets->size()); - values = castColumn({values, from_type, ""}, to_type); - part = values->createWithOffsets(offsets_data, *createColumnConstWithDefaultValue(result_column->getPtr()), part_size, /*shift=*/ 0); - } - } - - result_column->insertRangeFrom(*part, 0, part_size); - } - - data = { std::move(result_column) }; - num_of_defaults_in_prefix = 0; + return Base::create(std::move(typed_paths_), max_dynamic_paths_, max_dynamic_types_); } -void ColumnObject::Subcolumn::insertDefault() +std::string ColumnObject::getName() const { - if (data.empty()) - ++num_of_defaults_in_prefix; - else - data.back()->insertDefault(); - - ++num_rows; + WriteBufferFromOwnString ss; + ss << "Object("; + ss << "max_dynamic_paths=" << max_dynamic_paths; + ss << ", max_dynamic_types=" << max_dynamic_types; + std::vector sorted_typed_paths; + sorted_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + sorted_typed_paths.push_back(path); + std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end()); + for (const auto & path : sorted_typed_paths) + ss << ", " << path << " " << typed_paths.at(path)->getName(); + ss << ")"; + return ss.str(); } -void ColumnObject::Subcolumn::insertManyDefaults(size_t length) +MutableColumnPtr ColumnObject::cloneEmpty() const { - if (data.empty()) - num_of_defaults_in_prefix += length; - else - data.back()->insertManyDefaults(length); + std::unordered_map empty_typed_paths; + empty_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + empty_typed_paths[path] = column->cloneEmpty(); - num_rows += length; + std::unordered_map empty_dynamic_paths; + empty_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + empty_dynamic_paths[path] = column->cloneEmpty(); + + return ColumnObject::create( + std::move(empty_typed_paths), + std::move(empty_dynamic_paths), + shared_data->cloneEmpty(), + max_dynamic_paths, + global_max_dynamic_paths, + max_dynamic_types, + statistics); } -void ColumnObject::Subcolumn::popBack(size_t n) +MutableColumnPtr ColumnObject::cloneResized(size_t size) const { - assert(n <= size()); + std::unordered_map resized_typed_paths; + resized_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + resized_typed_paths[path] = column->cloneResized(size); - num_rows -= n; - size_t num_removed = 0; - for (auto it = data.rbegin(); it != data.rend(); ++it) - { - if (n == 0) - break; + std::unordered_map resized_dynamic_paths; + resized_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, column] : dynamic_paths) + resized_dynamic_paths[path] = column->cloneResized(size); - auto & column = *it; - if (n < column->size()) - { - column->popBack(n); - n = 0; - } - else - { - ++num_removed; - n -= column->size(); - } - } - - data.resize(data.size() - num_removed); - num_of_defaults_in_prefix -= n; -} - -ColumnObject::Subcolumn ColumnObject::Subcolumn::cut(size_t start, size_t length) const -{ - Subcolumn new_subcolumn(0, is_nullable); - new_subcolumn.insertRangeFrom(*this, start, length); - return new_subcolumn; -} - -Field ColumnObject::Subcolumn::getLastField() const -{ - if (data.empty()) - return Field(); - - const auto & last_part = data.back(); - assert(!last_part->empty()); - return (*last_part)[last_part->size() - 1]; -} - -FieldInfo ColumnObject::Subcolumn::getFieldInfo() const -{ - const auto & base_type = least_common_type.getBase(); - return FieldInfo - { - .scalar_type = base_type, - .have_nulls = base_type->isNullable(), - .need_convert = false, - .num_dimensions = least_common_type.getNumberOfDimensions(), - .need_fold_dimension = false, - }; -} - -ColumnObject::Subcolumn ColumnObject::Subcolumn::recreateWithDefaultValues(const FieldInfo & field_info) const -{ - auto scalar_type = field_info.scalar_type; - if (is_nullable) - scalar_type = makeNullable(scalar_type); - - Subcolumn new_subcolumn(*this); - new_subcolumn.least_common_type = LeastCommonType{createArrayOfType(scalar_type, field_info.num_dimensions)}; - - for (auto & part : new_subcolumn.data) - part = recreateColumnWithDefaultValues(part, scalar_type, field_info.num_dimensions); - - return new_subcolumn; -} - -IColumn & ColumnObject::Subcolumn::getFinalizedColumn() -{ - assert(isFinalized()); - return *data[0]; -} - -const IColumn & ColumnObject::Subcolumn::getFinalizedColumn() const -{ - assert(isFinalized()); - return *data[0]; -} - -const ColumnPtr & ColumnObject::Subcolumn::getFinalizedColumnPtr() const -{ - assert(isFinalized()); - return data[0]; -} - -ColumnObject::Subcolumn::LeastCommonType::LeastCommonType() - : type(std::make_shared()) - , base_type(type) - , num_dimensions(0) -{ -} - -ColumnObject::Subcolumn::LeastCommonType::LeastCommonType(DataTypePtr type_) - : type(std::move(type_)) - , base_type(getBaseTypeOfArray(type)) - , num_dimensions(DB::getNumberOfDimensions(*type)) -{ -} - -ColumnObject::ColumnObject(bool is_nullable_) - : is_nullable(is_nullable_) - , num_rows(0) -{ -} - -ColumnObject::ColumnObject(Subcolumns && subcolumns_, bool is_nullable_) - : is_nullable(is_nullable_) - , subcolumns(std::move(subcolumns_)) - , num_rows(subcolumns.empty() ? 0 : (*subcolumns.begin())->data.size()) - -{ - checkConsistency(); -} - -void ColumnObject::checkConsistency() const -{ - if (subcolumns.empty()) - return; - - for (const auto & leaf : subcolumns) - { - if (num_rows != leaf->data.size()) - { - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Sizes of subcolumns are inconsistent in ColumnObject." - " Subcolumn '{}' has {} rows, but expected size is {}", - leaf->path.getPath(), leaf->data.size(), num_rows); - } - } -} - -size_t ColumnObject::size() const -{ -#ifndef NDEBUG - checkConsistency(); -#endif - return num_rows; -} - -size_t ColumnObject::byteSize() const -{ - size_t res = 0; - for (const auto & entry : subcolumns) - res += entry->data.byteSize(); - return res; -} - -size_t ColumnObject::allocatedBytes() const -{ - size_t res = 0; - for (const auto & entry : subcolumns) - res += entry->data.allocatedBytes(); - return res; -} - -void ColumnObject::forEachSubcolumn(MutableColumnCallback callback) -{ - for (auto & entry : subcolumns) - for (auto & part : entry->data.data) - callback(part); -} - -void ColumnObject::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) -{ - for (auto & entry : subcolumns) - { - for (auto & part : entry->data.data) - { - callback(*part); - part->forEachSubcolumnRecursively(callback); - } - } -} - -void ColumnObject::insert(const Field & field) -{ - const auto & object = field.safeGet(); - - HashSet inserted_paths; - size_t old_size = size(); - for (const auto & [key_str, value] : object) - { - PathInData key(key_str); - inserted_paths.insert(key_str); - if (!hasSubcolumn(key)) - addSubcolumn(key, old_size); - - auto & subcolumn = getSubcolumn(key); - subcolumn.insert(value); - } - - for (auto & entry : subcolumns) - { - if (!inserted_paths.has(entry->path.getPath())) - { - bool inserted = tryInsertDefaultFromNested(entry); - if (!inserted) - entry->data.insertDefault(); - } - } - - ++num_rows; -} - -bool ColumnObject::tryInsert(const Field & field) -{ - if (field.getType() != Field::Types::Which::Object) - return false; - - insert(field); - return true; -} - -void ColumnObject::insertDefault() -{ - for (auto & entry : subcolumns) - entry->data.insertDefault(); - - ++num_rows; + return ColumnObject::create( + std::move(resized_typed_paths), + std::move(resized_dynamic_paths), + shared_data->cloneResized(size), + max_dynamic_paths, + global_max_dynamic_paths, + max_dynamic_types, + statistics); } Field ColumnObject::operator[](size_t n) const { - Field object; - get(n, object); + Object object; + + for (const auto & [path, column] : typed_paths) + object[path] = (*column)[n]; + for (const auto & [path, column] : dynamic_paths_ptrs) + { + /// Output only non-null values from dynamic paths. We cannot distinguish cases when + /// dynamic path has Null value and when it's absent in the row and consider them equivalent. + if (!column->isNullAt(n)) + object[path] = (*column)[n]; + } + + const auto & shared_data_offsets = getSharedDataOffsets(); + const auto [shared_paths, shared_values] = getSharedDataPathsAndValues(); + size_t start = shared_data_offsets[static_cast(n) - 1]; + size_t end = shared_data_offsets[n]; + for (size_t i = start; i != end; ++i) + { + String path = shared_paths->getDataAt(i).toString(); + auto value_data = shared_values->getDataAt(i); + ReadBufferFromMemory buf(value_data.data, value_data.size); + Field value; + getDynamicSerialization()->deserializeBinary(value, buf, getFormatSettings()); + object[path] = value; + } + return object; } void ColumnObject::get(size_t n, Field & res) const { - assert(n < size()); - res = Object(); - auto & object = res.safeGet(); + res = (*this)[n]; +} - for (const auto & entry : subcolumns) +bool ColumnObject::isDefaultAt(size_t n) const +{ + for (const auto & [path, column] : typed_paths) { - auto it = object.try_emplace(entry->path.getPath()).first; - entry->data.get(n, it->second); + if (!column->isDefaultAt(n)) + return false; } + + for (const auto & [path, column] : dynamic_paths_ptrs) + { + if (!column->isDefaultAt(n)) + return false; + } + + if (!shared_data->isDefaultAt(n)) + return false; + + return true; +} + +StringRef ColumnObject::getDataAt(size_t) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getDataAt is not supported for {}", getName()); +} + +void ColumnObject::insertData(const char *, size_t) +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertData is not supported for {}", getName()); +} + +ColumnDynamic * ColumnObject::tryToAddNewDynamicPath(std::string_view path) +{ + if (dynamic_paths.size() == max_dynamic_paths) + return nullptr; + + auto new_dynamic_column = ColumnDynamic::create(max_dynamic_types); + new_dynamic_column->reserve(shared_data->capacity()); + new_dynamic_column->insertManyDefaults(size()); + auto it = dynamic_paths.emplace(path, std::move(new_dynamic_column)).first; + auto it_ptr = dynamic_paths_ptrs.emplace(path, assert_cast(it->second.get())).first; + return it_ptr->second; +} + +void ColumnObject::addNewDynamicPath(std::string_view path) +{ + if (!tryToAddNewDynamicPath(path)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add new dynamic path as the limit ({}) on dynamic paths is reached", max_dynamic_paths); +} + +void ColumnObject::setMaxDynamicPaths(size_t max_dynamic_paths_) +{ + if (!empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Setting specific max_dynamic_paths parameter is allowed only for empty object column"); + + max_dynamic_paths = max_dynamic_paths_; +} + +void ColumnObject::setDynamicPaths(const std::vector & paths) +{ + if (paths.size() > max_dynamic_paths) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot set dynamic paths to Object column, the number of paths ({}) exceeds the limit ({})", paths.size(), max_dynamic_paths); + + size_t size = this->size(); + for (const auto & path : paths) + { + auto new_dynamic_column = ColumnDynamic::create(max_dynamic_types); + if (size) + new_dynamic_column->insertManyDefaults(size); + dynamic_paths[path] = std::move(new_dynamic_column); + dynamic_paths_ptrs[path] = assert_cast(dynamic_paths[path].get()); + } +} + +void ColumnObject::insert(const Field & x) +{ + const auto & object = x.safeGet(); + auto & shared_data_offsets = getSharedDataOffsets(); + auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + size_t current_size = size(); + for (const auto & [path, value_field] : object) + { + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + typed_it->second->insert(value_field); + } + else if (auto dynamic_it = dynamic_paths_ptrs.find(path); dynamic_it != dynamic_paths_ptrs.end()) + { + dynamic_it->second->insert(value_field); + } + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + { + dynamic_path_column->insert(value_field); + } + /// We reached the limit on dynamic paths. Add this path to the common data if the value is not Null. + /// (we cannot distinguish cases when path has Null value or is absent in the row and consider them equivalent). + /// Object is actually std::map, so all paths are already sorted and we can add it right now. + else if (!value_field.isNull()) + { + shared_data_paths->insertData(path.data(), path.size()); + auto & shared_data_values_chars = shared_data_values->getChars(); + WriteBufferFromVector value_buf(shared_data_values_chars, AppendModeTag()); + getDynamicSerialization()->serializeBinary(value_field, value_buf, getFormatSettings()); + value_buf.finalize(); + shared_data_values_chars.push_back(0); + shared_data_values->getOffsets().push_back(shared_data_values_chars.size()); + } + } + + shared_data_offsets.push_back(shared_data_paths->size()); + + /// Fill all remaining typed and dynamic paths with default values. + for (auto & [_, column] : typed_paths) + { + if (column->size() == current_size) + column->insertDefault(); + } + + for (auto & [_, column] : dynamic_paths_ptrs) + { + if (column->size() == current_size) + column->insertDefault(); + } +} + +bool ColumnObject::tryInsert(const Field & x) +{ + if (x.getType() != Field::Types::Which::Object) + return false; + + const auto & object = x.safeGet(); + auto & shared_data_offsets = getSharedDataOffsets(); + auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + size_t prev_size = size(); + size_t prev_paths_size = shared_data_paths->size(); + size_t prev_values_size = shared_data_values->size(); + /// Save all newly added dynamic paths. In case of failure + /// we should remove them. + std::unordered_set new_dynamic_paths; + auto restore_sizes = [&]() + { + for (auto & [_, column] : typed_paths) + { + if (column->size() != prev_size) + column->popBack(column->size() - prev_size); + } + + /// Remove all newly added dynamic paths. + for (const auto & path : new_dynamic_paths) + { + dynamic_paths_ptrs.erase(path); + dynamic_paths.erase(path); + } + + for (auto & [_, column] : dynamic_paths_ptrs) + { + if (column->size() != prev_size) + column->popBack(column->size() - prev_size); + } + + if (shared_data_paths->size() != prev_paths_size) + shared_data_paths->popBack(shared_data_paths->size() - prev_paths_size); + if (shared_data_values->size() != prev_values_size) + shared_data_values->popBack(shared_data_values->size() - prev_values_size); + }; + + for (const auto & [path, value_field] : object) + { + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + if (!typed_it->second->tryInsert(value_field)) + { + restore_sizes(); + return false; + } + } + else if (auto dynamic_it = dynamic_paths_ptrs.find(path); dynamic_it != dynamic_paths_ptrs.end()) + { + if (!dynamic_it->second->tryInsert(value_field)) + { + restore_sizes(); + return false; + } + } + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + { + if (!dynamic_path_column->tryInsert(value_field)) + { + restore_sizes(); + return false; + } + } + /// We reached the limit on dynamic paths. Add this path to the common data if the value is not Null. + /// (we cannot distinguish cases when path has Null value or is absent in the row and consider them equivalent). + /// Object is actually std::map, so all paths are already sorted and we can add it right now. + else if (!value_field.isNull()) + { + WriteBufferFromOwnString value_buf; + getDynamicSerialization()->serializeBinary(value_field, value_buf, getFormatSettings()); + shared_data_paths->insertData(path.data(), path.size()); + shared_data_values->insertData(value_buf.str().data(), value_buf.str().size()); + } + } + + shared_data_offsets.push_back(shared_data_paths->size()); + + /// Fill all remaining typed and dynamic paths with default values. + for (auto & [_, column] : typed_paths) + { + if (column->size() == prev_size) + column->insertDefault(); + } + + for (auto & [_, column] : dynamic_paths_ptrs) + { + if (column->size() == prev_size) + column->insertDefault(); + } + + return true; } #if !defined(DEBUG_OR_SANITIZER_BUILD) @@ -769,7 +447,31 @@ void ColumnObject::insertFrom(const IColumn & src, size_t n) void ColumnObject::doInsertFrom(const IColumn & src, size_t n) #endif { - insert(src[n]); + const auto & src_object_column = assert_cast(src); + + /// First, insert typed paths, they must be the same for both columns. + for (const auto & [path, column] : src_object_column.typed_paths) + typed_paths[path]->insertFrom(*column, n); + + /// Second, insert dynamic paths and extend them if needed. + /// We can reach the limit of dynamic paths, and in this case + /// the rest of dynamic paths will be inserted into shared data. + std::vector src_dynamic_paths_for_shared_data; + for (const auto & [path, column] : src_object_column.dynamic_paths) + { + /// Check if we already have such dynamic path. + if (auto it = dynamic_paths_ptrs.find(path); it != dynamic_paths_ptrs.end()) + it->second->insertFrom(*column, n); + /// Try to add a new dynamic path. + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + dynamic_path_column->insertFrom(*column, n); + /// Limit on dynamic paths is reached, add path to shared data later. + else + src_dynamic_paths_for_shared_data.push_back(path); + } + + /// Finally, insert paths from shared data. + insertFromSharedDataAndFillRemainingDynamicPaths(src_object_column, std::move(src_dynamic_paths_for_shared_data), n, 1); } #if !defined(DEBUG_OR_SANITIZER_BUILD) @@ -778,101 +480,659 @@ void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t len void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) #endif { - const auto & src_object = assert_cast(src); + /// TODO: try to parallelize doInsertRangeFrom over typed/dynamic paths if it makes sense. + const auto & src_object_column = assert_cast(src); - for (const auto & entry : src_object.subcolumns) + /// First, insert typed paths, they must be the same for both columns. + for (const auto & [path, column] : src_object_column.typed_paths) + typed_paths[path]->insertRangeFrom(*column, start, length); + + /// Second, insert dynamic paths and extend them if needed. + /// We can reach the limit of dynamic paths, and in this case + /// the rest of dynamic paths will be inserted into shared data. + std::vector src_dynamic_paths_for_shared_data; + for (const auto & [path, column] : src_object_column.dynamic_paths) { - if (!hasSubcolumn(entry->path)) + /// Check if we already have such dynamic path. + if (auto it = dynamic_paths_ptrs.find(path); it != dynamic_paths_ptrs.end()) + it->second->insertRangeFrom(*column, start, length); + /// Try to add a new dynamic path. + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + dynamic_path_column->insertRangeFrom(*column, start, length); + /// Limit on dynamic paths is reached, add path to shared data later. + else + src_dynamic_paths_for_shared_data.push_back(path); + } + + /// Finally, insert paths from shared data. + insertFromSharedDataAndFillRemainingDynamicPaths(src_object_column, std::move(src_dynamic_paths_for_shared_data), start, length); +} + +void ColumnObject::insertFromSharedDataAndFillRemainingDynamicPaths(const DB::ColumnObject & src_object_column, std::vector && src_dynamic_paths_for_shared_data, size_t start, size_t length) +{ + /// Paths in shared data are sorted, so paths from src_dynamic_paths_for_shared_data should be inserted properly + /// to keep paths sorted. Let's sort them in advance. + std::sort(src_dynamic_paths_for_shared_data.begin(), src_dynamic_paths_for_shared_data.end()); + + /// Check if src object doesn't have any paths in shared data in specified range. + const auto & src_shared_data_offsets = src_object_column.getSharedDataOffsets(); + if (src_shared_data_offsets[static_cast(start) - 1] == src_shared_data_offsets[static_cast(start) + length - 1]) + { + size_t current_size = size(); + + /// If no src dynamic columns should be inserted into shared data, insert defaults. + if (src_dynamic_paths_for_shared_data.empty()) { - if (entry->path.hasNested()) - addNestedSubcolumn(entry->path, entry->data.getFieldInfo(), num_rows); + shared_data->insertManyDefaults(length); + } + /// Otherwise insert required src dynamic columns into shared data. + else + { + const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + auto & shared_data_offsets = getSharedDataOffsets(); + for (size_t i = start; i != start + length; ++i) + { + /// Paths in src_dynamic_paths_for_shared_data are already sorted. + for (const auto path : src_dynamic_paths_for_shared_data) + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, path, *src_object_column.dynamic_paths.find(path)->second, i); + shared_data_offsets.push_back(shared_data_paths->size()); + } + } + + /// Insert default values in all remaining dynamic paths. + for (auto & [_, column] : dynamic_paths_ptrs) + { + if (column->size() == current_size) + column->insertManyDefaults(length); + } + return; + } + + /// Src object column contains some paths in shared data in specified range. + /// Iterate over this range and insert all required paths into shared data or dynamic paths. + const auto [src_shared_data_paths, src_shared_data_values] = src_object_column.getSharedDataPathsAndValues(); + const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + auto & shared_data_offsets = getSharedDataOffsets(); + for (size_t row = start; row != start + length; ++row) + { + size_t current_size = shared_data_offsets.size(); + /// Use separate index to iterate over sorted src_dynamic_paths_for_shared_data. + size_t src_dynamic_paths_for_shared_data_index = 0; + size_t offset = src_shared_data_offsets[static_cast(row) - 1]; + size_t end = src_shared_data_offsets[row]; + for (size_t i = offset; i != end; ++i) + { + auto path = src_shared_data_paths->getDataAt(i).toView(); + /// Check if we have this path in dynamic paths. + if (auto it = dynamic_paths_ptrs.find(path); it != dynamic_paths_ptrs.end()) + { + /// Deserialize binary value into dynamic column from shared data. + deserializeValueFromSharedData(src_shared_data_values, i, *it->second); + } else - addSubcolumn(entry->path, num_rows); + { + /// Before inserting this path into shared data check if we need to + /// insert dynamic paths from src_dynamic_paths_for_shared_data before. + while (src_dynamic_paths_for_shared_data_index < src_dynamic_paths_for_shared_data.size() + && src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index] < path) + { + const auto dynamic_path = src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index]; + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, dynamic_path, *src_object_column.dynamic_paths.find(dynamic_path)->second, row); + ++src_dynamic_paths_for_shared_data_index; + } + + /// Insert path and value from src shared data to our shared data. + shared_data_paths->insertFrom(*src_shared_data_paths, i); + shared_data_values->insertFrom(*src_shared_data_values, i); + } } - auto & subcolumn = getSubcolumn(entry->path); - subcolumn.insertRangeFrom(entry->data, start, length); - } - - for (auto & entry : subcolumns) - { - if (!src_object.hasSubcolumn(entry->path)) + /// Insert remaining dynamic paths from src_dynamic_paths_for_shared_data. + for (; src_dynamic_paths_for_shared_data_index != src_dynamic_paths_for_shared_data.size(); ++src_dynamic_paths_for_shared_data_index) { - bool inserted = tryInsertManyDefaultsFromNested(entry); - if (!inserted) - entry->data.insertManyDefaults(length); + const auto dynamic_path = src_dynamic_paths_for_shared_data[src_dynamic_paths_for_shared_data_index]; + serializePathAndValueIntoSharedData(shared_data_paths, shared_data_values, dynamic_path, *src_object_column.dynamic_paths.find(dynamic_path)->second, row); + } + + shared_data_offsets.push_back(shared_data_paths->size()); + + /// Insert default value in all remaining dynamic paths. + for (auto & [_, column] : dynamic_paths_ptrs) + { + if (column->size() == current_size) + column->insertDefault(); } } - - num_rows += length; - finalize(); } -void ColumnObject::popBack(size_t length) +void ColumnObject::serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, std::string_view path, const IColumn & column, size_t n) { - for (auto & entry : subcolumns) - entry->data.popBack(length); + /// Don't store Null values in shared data. We consider Null value equivalent to the absence + /// of this path in the row because we cannot distinguish these 2 cases for dynamic paths. + if (column.isNullAt(n)) + return; - num_rows -= length; + shared_data_paths->insertData(path.data(), path.size()); + auto & shared_data_values_chars = shared_data_values->getChars(); + WriteBufferFromVector value_buf(shared_data_values_chars, AppendModeTag()); + getDynamicSerialization()->serializeBinary(column, n, value_buf, getFormatSettings()); + value_buf.finalize(); + shared_data_values_chars.push_back(0); + shared_data_values->getOffsets().push_back(shared_data_values_chars.size()); } -template -MutableColumnPtr ColumnObject::applyForSubcolumns(Func && func) const +void ColumnObject::deserializeValueFromSharedData(const ColumnString * shared_data_values, size_t n, IColumn & column) const { - if (!isFinalized()) + auto value_data = shared_data_values->getDataAt(n); + ReadBufferFromMemory buf(value_data.data, value_data.size); + getDynamicSerialization()->deserializeBinary(column, buf, getFormatSettings()); +} + +void ColumnObject::insertDefault() +{ + for (auto & [_, column] : typed_paths) + column->insertDefault(); + for (auto & [_, column] : dynamic_paths_ptrs) + column->insertDefault(); + shared_data->insertDefault(); +} + +void ColumnObject::insertManyDefaults(size_t length) +{ + for (auto & [_, column] : typed_paths) + column->insertManyDefaults(length); + for (auto & [_, column] : dynamic_paths_ptrs) + column->insertManyDefaults(length); + shared_data->insertManyDefaults(length); +} + +void ColumnObject::popBack(size_t n) +{ + for (auto & [_, column] : typed_paths) + column->popBack(n); + for (auto & [_, column] : dynamic_paths_ptrs) + column->popBack(n); + shared_data->popBack(n); +} + +StringRef ColumnObject::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin) const +{ + StringRef res(begin, 0); + // Serialize all paths and values in binary format. + const auto & shared_data_offsets = getSharedDataOffsets(); + size_t offset = shared_data_offsets[static_cast(n) - 1]; + size_t end = shared_data_offsets[static_cast(n)]; + size_t num_paths = typed_paths.size() + dynamic_paths.size() + (end - offset); + char * pos = arena.allocContinue(sizeof(size_t), begin); + memcpy(pos, &num_paths, sizeof(size_t)); + res.data = pos - res.size; + res.size += sizeof(size_t); + /// Serialize paths and values from typed paths. + for (const auto & [path, column] : typed_paths) { - auto finalized = cloneFinalized(); - auto & finalized_object = assert_cast(*finalized); - return finalized_object.applyForSubcolumns(std::forward(func)); + size_t path_size = path.size(); + pos = arena.allocContinue(sizeof(size_t) + path_size, begin); + memcpy(pos, &path_size, sizeof(size_t)); + memcpy(pos + sizeof(size_t), path.data(), path_size); + auto data_ref = column->serializeValueIntoArena(n, arena, begin); + res.data = data_ref.data - res.size - sizeof(size_t) - path_size; + res.size += data_ref.size + sizeof(size_t) + path_size; } - auto res = ColumnObject::create(is_nullable); - for (const auto & subcolumn : subcolumns) + /// Serialize paths and values from dynamic paths. + for (const auto & [path, column] : dynamic_paths) { - auto new_subcolumn = func(subcolumn->data.getFinalizedColumn()); - res->addSubcolumn(subcolumn->path, new_subcolumn->assumeMutable()); + WriteBufferFromOwnString buf; + getDynamicSerialization()->serializeBinary(*column, n, buf, getFormatSettings()); + serializePathAndValueIntoArena(arena, begin, path, buf.str(), res); } + /// Serialize paths and values from shared data. + auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + for (size_t i = offset; i != end; ++i) + serializePathAndValueIntoArena(arena, begin, shared_data_paths->getDataAt(i), shared_data_values->getDataAt(i), res); + return res; } -ColumnPtr ColumnObject::permute(const Permutation & perm, size_t limit) const +void ColumnObject::serializePathAndValueIntoArena(DB::Arena & arena, const char *& begin, StringRef path, StringRef value, StringRef & res) const { - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.permute(perm, limit); }); + size_t value_size = value.size; + size_t path_size = path.size; + char * pos = arena.allocContinue(sizeof(size_t) + path_size + sizeof(size_t) + value_size, begin); + memcpy(pos, &path_size, sizeof(size_t)); + memcpy(pos + sizeof(size_t), path.data, path_size); + memcpy(pos + sizeof(size_t) + path_size, &value_size, sizeof(size_t)); + memcpy(pos + sizeof(size_t) + path_size + sizeof(size_t), value.data, value_size); + res.data = pos - res.size; + res.size += sizeof(size_t) + path_size + sizeof(size_t) + value_size; } -ColumnPtr ColumnObject::filter(const Filter & filter, ssize_t result_size_hint) const +const char * ColumnObject::deserializeAndInsertFromArena(const char * pos) { - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.filter(filter, result_size_hint); }); + size_t current_size = size(); + /// Deserialize paths and values and insert them into typed paths, dynamic paths or shared data. + /// Serialized paths could be unsorted, so we will have to sort all paths that will be inserted into shared data. + std::vector> paths_and_values_for_shared_data; + auto num_paths = unalignedLoad(pos); + pos += sizeof(size_t); + for (size_t i = 0; i != num_paths; ++i) + { + auto path_size = unalignedLoad(pos); + pos += sizeof(size_t); + std::string_view path(pos, path_size); + pos += path_size; + /// Check if it's a typed path. In this case we should use + /// deserializeAndInsertFromArena of corresponding column. + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + pos = typed_it->second->deserializeAndInsertFromArena(pos); + } + /// If it's not a typed path, deserialize binary value and try to insert it + /// to dynamic paths or shared data. + else + { + auto value_size = unalignedLoad(pos); + pos += sizeof(size_t); + std::string_view value(pos, value_size); + pos += value_size; + /// Check if we have this path in dynamic paths. + if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + { + ReadBufferFromMemory buf(value.data(), value.size()); + getDynamicSerialization()->deserializeBinary(*dynamic_it->second, buf, getFormatSettings()); + } + /// Try to add a new dynamic path. + else if (auto * dynamic_path_column = tryToAddNewDynamicPath(path)) + { + ReadBufferFromMemory buf(value.data(), value.size()); + getDynamicSerialization()->deserializeBinary(*dynamic_path_column, buf, getFormatSettings()); + } + /// Limit on dynamic paths is reached, add this path to shared data later. + else + { + paths_and_values_for_shared_data.emplace_back(path, value); + } + } + } + + /// Sort and insert all paths from paths_and_values_for_shared_data into shared data. + std::sort(paths_and_values_for_shared_data.begin(), paths_and_values_for_shared_data.end()); + const auto [shared_data_paths, shared_data_values] = getSharedDataPathsAndValues(); + for (const auto & [path, value] : paths_and_values_for_shared_data) + { + shared_data_paths->insertData(path.data(), path.size()); + shared_data_values->insertData(value.data(), value.size()); + } + + getSharedDataOffsets().push_back(shared_data_paths->size()); + + /// Insert default value in all remaining typed and dynamic paths. + + for (auto & [_, column] : typed_paths) + { + if (column->size() == current_size) + column->insertDefault(); + } + + for (auto & [_, column] : dynamic_paths_ptrs) + { + if (column->size() == current_size) + column->insertDefault(); + } + + return pos; +} + +const char * ColumnObject::skipSerializedInArena(const char * pos) const +{ + auto num_paths = unalignedLoad(pos); + pos += sizeof(size_t); + for (size_t i = 0; i != num_paths; ++i) + { + auto path_size = unalignedLoad(pos); + pos += sizeof(size_t); + std::string_view path(pos, path_size); + pos += path_size; + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + pos = typed_it->second->skipSerializedInArena(pos); + } + else + { + auto value_size = unalignedLoad(pos); + pos += sizeof(size_t) + value_size; + } + } + + return pos; +} + +void ColumnObject::updateHashWithValue(size_t n, SipHash & hash) const +{ + for (const auto & [_, column] : typed_paths) + column->updateHashWithValue(n, hash); + for (const auto & [_, column] : dynamic_paths_ptrs) + column->updateHashWithValue(n, hash); + shared_data->updateHashWithValue(n, hash); +} + +WeakHash32 ColumnObject::getWeakHash32() const +{ + WeakHash32 hash(size()); + for (const auto & [_, column] : typed_paths) + hash.update(column->getWeakHash32()); + for (const auto & [_, column] : dynamic_paths_ptrs) + hash.update(column->getWeakHash32()); + hash.update(shared_data->getWeakHash32()); + return hash; +} + +void ColumnObject::updateHashFast(SipHash & hash) const +{ + for (const auto & [_, column] : typed_paths) + column->updateHashFast(hash); + for (const auto & [_, column] : dynamic_paths_ptrs) + column->updateHashFast(hash); + shared_data->updateHashFast(hash); +} + +ColumnPtr ColumnObject::filter(const Filter & filt, ssize_t result_size_hint) const +{ + std::unordered_map filtered_typed_paths; + filtered_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + filtered_typed_paths[path] = column->filter(filt, result_size_hint); + + std::unordered_map filtered_dynamic_paths; + filtered_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) + filtered_dynamic_paths[path] = column->filter(filt, result_size_hint); + + auto filtered_shared_data = shared_data->filter(filt, result_size_hint); + return ColumnObject::create(filtered_typed_paths, filtered_dynamic_paths, filtered_shared_data, max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types); +} + +void ColumnObject::expand(const Filter & mask, bool inverted) +{ + for (auto & [_, column] : typed_paths) + column->expand(mask, inverted); + for (auto & [_, column] : dynamic_paths_ptrs) + column->expand(mask, inverted); + shared_data->expand(mask, inverted); +} + +ColumnPtr ColumnObject::permute(const Permutation & perm, size_t limit) const +{ + std::unordered_map permuted_typed_paths; + permuted_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + permuted_typed_paths[path] = column->permute(perm, limit); + + std::unordered_map permuted_dynamic_paths; + permuted_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) + permuted_dynamic_paths[path] = column->permute(perm, limit); + + auto permuted_shared_data = shared_data->permute(perm, limit); + return ColumnObject::create(permuted_typed_paths, permuted_dynamic_paths, permuted_shared_data, max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types); } ColumnPtr ColumnObject::index(const IColumn & indexes, size_t limit) const { - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.index(indexes, limit); }); + std::unordered_map indexed_typed_paths; + indexed_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + indexed_typed_paths[path] = column->index(indexes, limit); + + std::unordered_map indexed_dynamic_paths; + indexed_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) + indexed_dynamic_paths[path] = column->index(indexes, limit); + + auto indexed_shared_data = shared_data->index(indexes, limit); + return ColumnObject::create(indexed_typed_paths, indexed_dynamic_paths, indexed_shared_data, max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types); } -ColumnPtr ColumnObject::replicate(const Offsets & offsets) const +ColumnPtr ColumnObject::replicate(const Offsets & replicate_offsets) const { - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.replicate(offsets); }); + std::unordered_map replicated_typed_paths; + replicated_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, column] : typed_paths) + replicated_typed_paths[path] = column->replicate(replicate_offsets); + + std::unordered_map replicated_dynamic_paths; + replicated_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) + replicated_dynamic_paths[path] = column->replicate(replicate_offsets); + + auto replicated_shared_data = shared_data->replicate(replicate_offsets); + return ColumnObject::create(replicated_typed_paths, replicated_dynamic_paths, replicated_shared_data, max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types); } -MutableColumnPtr ColumnObject::cloneResized(size_t new_size) const +MutableColumns ColumnObject::scatter(ColumnIndex num_columns, const Selector & selector) const { - if (new_size == 0) - return ColumnObject::create(is_nullable); + std::vector> scattered_typed_paths(num_columns); + for (auto & typed_paths_ : scattered_typed_paths) + typed_paths_.reserve(typed_paths.size()); - return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.cloneResized(new_size); }); + for (const auto & [path, column] : typed_paths) + { + auto scattered_columns = column->scatter(num_columns, selector); + for (size_t i = 0; i != num_columns; ++i) + scattered_typed_paths[i][path] = std::move(scattered_columns[i]); + } + + std::vector> scattered_dynamic_paths(num_columns); + for (auto & dynamic_paths_ : scattered_dynamic_paths) + dynamic_paths_.reserve(dynamic_paths_ptrs.size()); + + for (const auto & [path, column] : dynamic_paths_ptrs) + { + auto scattered_columns = column->scatter(num_columns, selector); + for (size_t i = 0; i != num_columns; ++i) + scattered_dynamic_paths[i][path] = std::move(scattered_columns[i]); + } + + auto scattered_shared_data_columns = shared_data->scatter(num_columns, selector); + MutableColumns result_columns; + result_columns.reserve(num_columns); + for (size_t i = 0; i != num_columns; ++i) + result_columns.emplace_back(ColumnObject::create(std::move(scattered_typed_paths[i]), std::move(scattered_dynamic_paths[i]), std::move(scattered_shared_data_columns[i]), max_dynamic_paths, global_max_dynamic_paths, max_dynamic_types)); + return result_columns; } void ColumnObject::getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const { - res.resize(num_rows); + /// Values in ColumnObject are not comparable. + res.resize(size()); iota(res.data(), res.size(), size_t(0)); } -void ColumnObject::getExtremes(Field & min, Field & max) const +void ColumnObject::reserve(size_t n) { - if (num_rows == 0) + for (auto & [_, column] : typed_paths) + column->reserve(n); + for (auto & [_, column] : dynamic_paths_ptrs) + column->reserve(n); + shared_data->reserve(n); +} + +size_t ColumnObject::capacity() const +{ + return shared_data->capacity(); +} + +void ColumnObject::ensureOwnership() +{ + for (auto & [_, column] : typed_paths) + column->ensureOwnership(); + for (auto & [_, column] : dynamic_paths_ptrs) + column->ensureOwnership(); + shared_data->ensureOwnership(); +} + +size_t ColumnObject::byteSize() const +{ + size_t size = 0; + for (const auto & [_, column] : typed_paths) + size += column->byteSize(); + for (const auto & [_, column] : dynamic_paths_ptrs) + size += column->byteSize(); + size += shared_data->byteSize(); + return size; +} + +size_t ColumnObject::byteSizeAt(size_t n) const +{ + size_t size = 0; + for (const auto & [_, column] : typed_paths) + size += column->byteSizeAt(n); + for (const auto & [_, column] : dynamic_paths_ptrs) + size += column->byteSizeAt(n); + size += shared_data->byteSizeAt(n); + return size; +} + +size_t ColumnObject::allocatedBytes() const +{ + size_t size = 0; + for (const auto & [_, column] : typed_paths) + size += column->allocatedBytes(); + for (const auto & [_, column] : dynamic_paths_ptrs) + size += column->allocatedBytes(); + size += shared_data->allocatedBytes(); + return size; +} + +void ColumnObject::protect() +{ + for (auto & [_, column] : typed_paths) + column->protect(); + for (auto & [_, column] : dynamic_paths_ptrs) + column->protect(); + shared_data->protect(); +} + +void ColumnObject::forEachSubcolumn(DB::IColumn::MutableColumnCallback callback) +{ + for (auto & [_, column] : typed_paths) + callback(column); + for (auto & [path, column] : dynamic_paths) + { + callback(column); + dynamic_paths_ptrs[path] = assert_cast(column.get()); + } + callback(shared_data); +} + +void ColumnObject::forEachSubcolumnRecursively(DB::IColumn::RecursiveMutableColumnCallback callback) +{ + for (auto & [_, column] : typed_paths) + { + callback(*column); + column->forEachSubcolumnRecursively(callback); + } + for (auto & [path, column] : dynamic_paths) + { + callback(*column); + column->forEachSubcolumnRecursively(callback); + dynamic_paths_ptrs[path] = assert_cast(column.get()); + } + callback(*shared_data); + shared_data->forEachSubcolumnRecursively(callback); +} + +bool ColumnObject::structureEquals(const IColumn & rhs) const +{ + /// 2 Object columns have equal structure if they have the same typed paths and max_dynamic_paths/max_dynamic_types. + const auto * rhs_object = typeid_cast(&rhs); + if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() || max_dynamic_paths != rhs_object->max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types) + return false; + + for (const auto & [path, column] : typed_paths) + { + auto it = rhs_object->typed_paths.find(path); + if (it == rhs_object->typed_paths.end() || !it->second->structureEquals(*column)) + return false; + } + + return true; +} + +ColumnPtr ColumnObject::compress() const +{ + std::unordered_map compressed_typed_paths; + compressed_typed_paths.reserve(typed_paths.size()); + size_t byte_size = 0; + for (const auto & [path, column] : typed_paths) + { + auto compressed_column = column->compress(); + byte_size += compressed_column->byteSize(); + compressed_typed_paths[path] = std::move(compressed_column); + } + + std::unordered_map compressed_dynamic_paths; + compressed_dynamic_paths.reserve(dynamic_paths_ptrs.size()); + for (const auto & [path, column] : dynamic_paths_ptrs) + { + auto compressed_column = column->compress(); + byte_size += compressed_column->byteSize(); + compressed_dynamic_paths[path] = std::move(compressed_column); + } + + auto compressed_shared_data = shared_data->compress(); + byte_size += compressed_shared_data->byteSize(); + + auto decompress = + [my_compressed_typed_paths = std::move(compressed_typed_paths), + my_compressed_dynamic_paths = std::move(compressed_dynamic_paths), + my_compressed_shared_data = std::move(compressed_shared_data), + my_max_dynamic_paths = max_dynamic_paths, + my_global_max_dynamic_paths = global_max_dynamic_paths, + my_max_dynamic_types = max_dynamic_types, + my_statistics = statistics]() mutable + { + std::unordered_map decompressed_typed_paths; + decompressed_typed_paths.reserve(my_compressed_typed_paths.size()); + for (const auto & [path, column] : my_compressed_typed_paths) + decompressed_typed_paths[path] = column->decompress(); + + std::unordered_map decompressed_dynamic_paths; + decompressed_dynamic_paths.reserve(my_compressed_dynamic_paths.size()); + for (const auto & [path, column] : my_compressed_dynamic_paths) + decompressed_dynamic_paths[path] = column->decompress(); + + auto decompressed_shared_data = my_compressed_shared_data->decompress(); + return ColumnObject::create(decompressed_typed_paths, decompressed_dynamic_paths, decompressed_shared_data, my_max_dynamic_paths, my_global_max_dynamic_paths, my_max_dynamic_types, my_statistics); + }; + + return ColumnCompressed::create(size(), byte_size, decompress); +} + +void ColumnObject::finalize() +{ + for (auto & [_, column] : typed_paths) + column->finalize(); + for (auto & [_, column] : dynamic_paths_ptrs) + column->finalize(); + shared_data->finalize(); +} + +bool ColumnObject::isFinalized() const +{ + bool finalized = true; + for (const auto & [_, column] : typed_paths) + finalized &= column->isFinalized(); + for (const auto & [_, column] : dynamic_paths_ptrs) + finalized &= column->isFinalized(); + finalized &= shared_data->isFinalized(); + return finalized; +} + +void ColumnObject::getExtremes(DB::Field & min, DB::Field & max) const +{ + if (empty()) { min = Object(); max = Object(); @@ -884,227 +1144,311 @@ void ColumnObject::getExtremes(Field & min, Field & max) const } } -const ColumnObject::Subcolumn & ColumnObject::getSubcolumn(const PathInData & key) const +void ColumnObject::prepareForSquashing(const std::vector & source_columns) { - if (const auto * node = subcolumns.findLeaf(key)) - return node->data; + if (source_columns.empty()) + return; - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in ColumnObject", key.getPath()); -} + /// Dynamic paths of source Object columns may differ. + /// We want to preallocate memory for all dynamic paths we will have after squashing. + /// It may happen that the total number of dynamic paths in source columns will + /// exceed the limit, in this case we will choose the most frequent paths. + std::unordered_map path_to_total_number_of_non_null_values; -ColumnObject::Subcolumn & ColumnObject::getSubcolumn(const PathInData & key) -{ - if (const auto * node = subcolumns.findLeaf(key)) - return const_cast(node)->data; - - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in ColumnObject", key.getPath()); -} - -bool ColumnObject::hasSubcolumn(const PathInData & key) const -{ - return subcolumns.findLeaf(key) != nullptr; -} - -void ColumnObject::addSubcolumn(const PathInData & key, MutableColumnPtr && subcolumn) -{ - size_t new_size = subcolumn->size(); - bool inserted = subcolumns.add(key, Subcolumn(std::move(subcolumn), is_nullable)); - - if (!inserted) - throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); - - if (num_rows == 0) - num_rows = new_size; - else if (new_size != num_rows) - throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, - "Size of subcolumn {} ({}) is inconsistent with column size ({})", - key.getPath(), new_size, num_rows); -} - -void ColumnObject::addSubcolumn(const PathInData & key, size_t new_size) -{ - bool inserted = subcolumns.add(key, Subcolumn(new_size, is_nullable)); - if (!inserted) - throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); - - if (num_rows == 0) - num_rows = new_size; - else if (new_size != num_rows) - throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, - "Required size of subcolumn {} ({}) is inconsistent with column size ({})", - key.getPath(), new_size, num_rows); -} - -void ColumnObject::addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size) -{ - if (!key.hasNested()) - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, - "Cannot add Nested subcolumn, because path doesn't contain Nested"); - - bool inserted = false; - /// We find node that represents the same Nested type as @key. - const auto * nested_node = subcolumns.findBestMatch(key); - - if (nested_node) + auto add_dynamic_paths = [&](const ColumnObject & source_object) { - /// Find any leaf of Nested subcolumn. - const auto * leaf = Subcolumns::findLeaf(nested_node, [&](const auto &) { return true; }); - assert(leaf); + for (const auto & [path, dynamic_column_ptr] : source_object.dynamic_paths_ptrs) + { + auto it = path_to_total_number_of_non_null_values.find(path); + if (it == path_to_total_number_of_non_null_values.end()) + it = path_to_total_number_of_non_null_values.emplace(path, 0).first; + it->second += (dynamic_column_ptr->size() - dynamic_column_ptr->getNumberOfDefaultRows()); + } + }; - /// Recreate subcolumn with default values and the same sizes of arrays. - auto new_subcolumn = leaf->data.recreateWithDefaultValues(field_info); + for (const auto & source_column : source_columns) + add_dynamic_paths(assert_cast(*source_column)); - /// It's possible that we have already inserted value from current row - /// to this subcolumn. So, adjust size to expected. - if (new_subcolumn.size() > new_size) - new_subcolumn.popBack(new_subcolumn.size() - new_size); + /// Add dynamic paths from this object column. + add_dynamic_paths(*this); - assert(new_subcolumn.size() == new_size); - inserted = subcolumns.add(key, new_subcolumn); + /// Check if the number of all dynamic paths exceeds the limit. + if (path_to_total_number_of_non_null_values.size() > max_dynamic_paths) + { + /// We want to keep the most frequent paths in the resulting object column. + /// Sort paths by total number of non null values. + /// Don't include paths from current column as we cannot change them. + std::vector> paths_with_sizes; + paths_with_sizes.reserve(path_to_total_number_of_non_null_values.size() - dynamic_paths.size()); + for (const auto & [path, size] : path_to_total_number_of_non_null_values) + { + if (!dynamic_paths.contains(path)) + paths_with_sizes.emplace_back(size, path); + } + std::sort(paths_with_sizes.begin(), paths_with_sizes.end(), std::greater()); + + /// Fill dynamic_paths with first paths in sorted list until we reach the limit. + size_t paths_to_add = max_dynamic_paths - dynamic_paths.size(); + for (size_t i = 0; i != paths_to_add; ++i) + addNewDynamicPath(paths_with_sizes[i].second); } + /// Otherwise keep all paths. else { - /// If node was not found just add subcolumn with empty arrays. - inserted = subcolumns.add(key, Subcolumn(new_size, is_nullable)); + /// Create columns for new dynamic paths. + for (const auto & [path, _] : path_to_total_number_of_non_null_values) + { + if (!dynamic_paths.contains(path)) + addNewDynamicPath(path); + } } - if (!inserted) - throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); + /// Now current object column has all resulting dynamic paths and we can call + /// prepareForSquashing on them to preallocate the memory. + /// Also we can preallocate memory for dynamic paths and shared data. + Columns shared_data_source_columns; + shared_data_source_columns.reserve(source_columns.size()); + std::unordered_map typed_paths_source_columns; + typed_paths_source_columns.reserve(typed_paths.size()); + std::unordered_map dynamic_paths_source_columns; + dynamic_paths_source_columns.reserve(dynamic_paths.size()); - if (num_rows == 0) - num_rows = new_size; - else if (new_size != num_rows) - throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, - "Required size of subcolumn {} ({}) is inconsistent with column size ({})", - key.getPath(), new_size, num_rows); + for (const auto & [path, column] : typed_paths) + typed_paths_source_columns[path].reserve(source_columns.size()); + + for (const auto & [path, column] : dynamic_paths) + dynamic_paths_source_columns[path].reserve(source_columns.size()); + + size_t total_size = 0; + for (const auto & source_column : source_columns) + { + const auto & source_object_column = assert_cast(*source_column); + total_size += source_object_column.size(); + shared_data_source_columns.push_back(source_object_column.shared_data); + + for (const auto & [path, column] : source_object_column.typed_paths) + typed_paths_source_columns.at(path).push_back(column); + + for (const auto & [path, column] : source_object_column.dynamic_paths) + { + if (dynamic_paths.contains(path)) + dynamic_paths_source_columns.at(path).push_back(column); + } + } + + shared_data->prepareForSquashing(shared_data_source_columns); + + for (const auto & [path, source_typed_columns] : typed_paths_source_columns) + typed_paths[path]->prepareForSquashing(source_typed_columns); + + for (const auto & [path, source_dynamic_columns] : dynamic_paths_source_columns) + { + /// ColumnDynamic::prepareForSquashing may not preallocate enough memory for discriminators and offsets + /// because source columns may not have this dynamic path (and so dynamic columns filled with nulls). + /// For this reason we first call ColumnDynamic::reserve with resulting size to preallocate memory for + /// discriminators and offsets and ColumnDynamic::prepareVariantsForSquashing to preallocate memory + /// for all variants inside Dynamic. + dynamic_paths_ptrs[path]->reserve(total_size); + dynamic_paths_ptrs[path]->prepareVariantsForSquashing(source_dynamic_columns); + } } -const ColumnObject::Subcolumns::Node * ColumnObject::getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const +void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & source_columns) { - if (!entry->path.hasNested()) - return nullptr; + if (!empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "takeDynamicStructureFromSourceColumns should be called only on empty Object column"); - size_t old_size = entry->data.size(); - const auto * current_node = subcolumns.findLeaf(entry->path); - const Subcolumns::Node * leaf = nullptr; + /// During serialization of Object column in MergeTree all Object columns + /// in single part must have the same structure (the same dynamic paths). During merge + /// resulting column is constructed by inserting from source columns, + /// but it may happen that resulting column doesn't have rows from all source parts + /// but only from subset of them, and as a result some dynamic paths could be missing + /// and structures of resulting column may differ. + /// To solve this problem, before merge we create empty resulting column and use this method + /// to take dynamic structure from all source columns even if we won't insert + /// rows from some of them. - while (current_node) + /// We want to construct resulting set of dynamic paths with paths that have least number of null values in source columns + /// and insert the rest paths into shared data if we exceed the limit of dynamic paths. + /// First, collect all dynamic paths from all source columns and calculate total number of non-null values. + std::unordered_map path_to_total_number_of_non_null_values; + for (const auto & source_column : source_columns) { - /// Try to find the first Nested up to the current node. - const auto * node_nested = Subcolumns::findParent(current_node, - [](const auto & candidate) { return candidate.isNested(); }); - - if (!node_nested) - break; - - /// Find the leaf with subcolumn that contains values - /// for the last rows. - /// If there are no leaves, skip current node and find - /// the next node up to the current. - leaf = Subcolumns::findLeaf(node_nested, - [&](const auto & candidate) + const auto & source_object = assert_cast(*source_column); + /// During deserialization from MergeTree we will have statistics from the whole + /// data part with number of non null values for each dynamic path. + const auto & source_statistics = source_object.getStatistics(); + for (const auto & [path, column_ptr] : source_object.dynamic_paths_ptrs) + { + auto it = path_to_total_number_of_non_null_values.find(path); + if (it == path_to_total_number_of_non_null_values.end()) + it = path_to_total_number_of_non_null_values.emplace(path, 0).first; + size_t size = column_ptr->size() - column_ptr->getNumberOfDefaultRows(); + if (source_statistics) { - return candidate.data.size() > old_size; - }); + auto statistics_it = source_statistics->dynamic_paths_statistics.find(path); + if (statistics_it != source_statistics->dynamic_paths_statistics.end()) + size = statistics_it->second; + } + it->second += size; + } - if (leaf) - break; - - current_node = node_nested->parent; + /// Add paths from shared data statistics. It can helo extracting frequent paths + /// from shared data to dynamic paths. + if (source_statistics) + { + for (const auto & [path, size] : source_statistics->shared_data_paths_statistics) + { + auto it = path_to_total_number_of_non_null_values.find(path); + if (it == path_to_total_number_of_non_null_values.end()) + it = path_to_total_number_of_non_null_values.emplace(path, 0).first; + it->second += size; + } + } } - if (leaf && isNothing(leaf->data.getLeastCommonTypeBase())) - return nullptr; + /// Reset current state. + dynamic_paths.clear(); + dynamic_paths_ptrs.clear(); + max_dynamic_paths = global_max_dynamic_paths; + Statistics new_statistics(Statistics::Source::MERGE); - return leaf; -} - -bool ColumnObject::tryInsertManyDefaultsFromNested(const Subcolumns::NodePtr & entry) const -{ - const auto * leaf = getLeafOfTheSameNested(entry); - if (!leaf) - return false; - - size_t old_size = entry->data.size(); - auto field_info = entry->data.getFieldInfo(); - - /// Cut the needed range from the found leaf - /// and replace scalar values to the correct - /// default values for given entry. - auto new_subcolumn = leaf->data - .cut(old_size, leaf->data.size() - old_size) - .recreateWithDefaultValues(field_info); - - entry->data.insertRangeFrom(new_subcolumn, 0, new_subcolumn.size()); - return true; -} - -bool ColumnObject::tryInsertDefaultFromNested(const Subcolumns::NodePtr & entry) const -{ - const auto * leaf = getLeafOfTheSameNested(entry); - if (!leaf) - return false; - - auto last_field = leaf->data.getLastField(); - if (last_field.isNull()) - return false; - - size_t leaf_num_dimensions = leaf->data.getNumberOfDimensions(); - size_t entry_num_dimensions = entry->data.getNumberOfDimensions(); - - auto default_scalar = entry_num_dimensions > leaf_num_dimensions - ? createEmptyArrayField(entry_num_dimensions - leaf_num_dimensions) - : entry->data.getLeastCommonTypeBase()->getDefault(); - - auto default_field = applyVisitor(FieldVisitorReplaceScalars(default_scalar, leaf_num_dimensions), last_field); - entry->data.insert(std::move(default_field)); - return true; -} - -PathsInData ColumnObject::getKeys() const -{ - PathsInData keys; - keys.reserve(subcolumns.size()); - for (const auto & entry : subcolumns) - keys.emplace_back(entry->path); - return keys; -} - -bool ColumnObject::isFinalized() const -{ - return std::all_of(subcolumns.begin(), subcolumns.end(), - [](const auto & entry) { return entry->data.isFinalized(); }); -} - -void ColumnObject::finalize() -{ - size_t old_size = size(); - Subcolumns new_subcolumns; - for (auto && entry : subcolumns) + /// Check if the number of all dynamic paths exceeds the limit. + if (path_to_total_number_of_non_null_values.size() > max_dynamic_paths) { - const auto & least_common_type = entry->data.getLeastCommonType(); + /// Sort paths by total number of non null values. + std::vector> paths_with_sizes; + paths_with_sizes.reserve(path_to_total_number_of_non_null_values.size()); + for (const auto & [path, size] : path_to_total_number_of_non_null_values) + paths_with_sizes.emplace_back(size, path); + std::sort(paths_with_sizes.begin(), paths_with_sizes.end(), std::greater()); - /// Do not add subcolumns, which consist only from NULLs. - if (isNothing(getBaseTypeOfArray(least_common_type))) - continue; - - entry->data.finalize(); - new_subcolumns.add(entry->path, entry->data); + /// Fill dynamic_paths with first max_dynamic_paths paths in sorted list. + for (const auto & [size, path] : paths_with_sizes) + { + if (dynamic_paths.size() < max_dynamic_paths) + { + dynamic_paths.emplace(path, ColumnDynamic::create(max_dynamic_types)); + dynamic_paths_ptrs.emplace(path, assert_cast(dynamic_paths.find(path)->second.get())); + } + /// Add all remaining paths into shared data statistics until we reach its max size; + else if (new_statistics.shared_data_paths_statistics.size() < Statistics::MAX_SHARED_DATA_STATISTICS_SIZE) + { + new_statistics.shared_data_paths_statistics.emplace(path, size); + } + } + } + /// Use all dynamic paths from all source columns. + else + { + for (const auto & [path, _] : path_to_total_number_of_non_null_values) + { + dynamic_paths[path] = ColumnDynamic::create(max_dynamic_types); + dynamic_paths_ptrs[path] = assert_cast(dynamic_paths[path].get()); + } } - /// If all subcolumns were skipped add a dummy subcolumn, - /// because Tuple type must have at least one element. - if (new_subcolumns.empty()) - new_subcolumns.add(PathInData{COLUMN_NAME_DUMMY}, Subcolumn{ColumnUInt8::create(old_size, 0), is_nullable}); + /// Fill statistics for the merged part. + for (const auto & [path, _] : dynamic_paths) + new_statistics.dynamic_paths_statistics[path] = path_to_total_number_of_non_null_values[path]; + statistics = std::make_shared(std::move(new_statistics)); - std::swap(subcolumns, new_subcolumns); - checkObjectHasNoAmbiguosPaths(getKeys()); + /// Set max_dynamic_paths to the number of selected dynamic paths. + /// It's needed to avoid adding new unexpected dynamic paths during inserts into this column during merge. + max_dynamic_paths = dynamic_paths.size(); + + /// Now we have the resulting set of dynamic paths that will be used in all merged columns. + /// As we use Dynamic column for dynamic paths, we should call takeDynamicStructureFromSourceColumns + /// on all resulting dynamic columns. + for (auto & [path, column] : dynamic_paths) + { + Columns dynamic_path_source_columns; + for (const auto & source_column : source_columns) + { + const auto & source_object = assert_cast(*source_column); + auto it = source_object.dynamic_paths.find(path); + if (it != source_object.dynamic_paths.end()) + dynamic_path_source_columns.push_back(it->second); + } + column->takeDynamicStructureFromSourceColumns(dynamic_path_source_columns); + } + + /// Typed paths also can contain types with dynamic structure. + for (auto & [path, column] : typed_paths) + { + Columns typed_path_source_columns; + typed_path_source_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + typed_path_source_columns.push_back(assert_cast(*source_column).typed_paths.at(path)); + column->takeDynamicStructureFromSourceColumns(typed_path_source_columns); + } } -void ColumnObject::updateHashFast(SipHash & hash) const +size_t ColumnObject::findPathLowerBoundInSharedData(StringRef path, const ColumnString & shared_data_paths, size_t start, size_t end) { - for (const auto & entry : subcolumns) - for (auto & part : entry->data.data) - part->updateHashFast(hash); + /// Simple random access iterator over values in ColumnString in specified range. + class Iterator + { + public: + using difference_type = size_t; + using value_type = StringRef; + using iterator_category = std::random_access_iterator_tag; + using pointer = StringRef*; + using reference = StringRef&; + + Iterator() = delete; + Iterator(const ColumnString * data_, size_t index_) : data(data_), index(index_) {} + Iterator(const Iterator & rhs) = default; + Iterator & operator=(const Iterator & rhs) = default; + inline Iterator& operator+=(difference_type rhs) { index += rhs; return *this;} + inline StringRef operator*() const {return data->getDataAt(index);} + + inline Iterator& operator++() { ++index; return *this; } + inline difference_type operator-(const Iterator & rhs) const {return index - rhs.index; } + + const ColumnString * data; + size_t index; + }; + + Iterator start_it(&shared_data_paths, start); + Iterator end_it(&shared_data_paths, end); + auto it = std::lower_bound(start_it, end_it, path); + return it.index; } + +void ColumnObject::fillPathColumnFromSharedData(IColumn & path_column, StringRef path, const ColumnPtr & shared_data_column, size_t start, size_t end) +{ + const auto & shared_data_array = assert_cast(*shared_data_column); + const auto & shared_data_offsets = shared_data_array.getOffsets(); + size_t first_offset = shared_data_offsets[static_cast(start) - 1]; + size_t last_offset = shared_data_offsets[static_cast(end) - 1]; + /// Check if we have at least one row with data. + if (first_offset == last_offset) + { + path_column.insertManyDefaults(end - start); + return; + } + + const auto & shared_data_tuple = assert_cast(shared_data_array.getData()); + const auto & shared_data_paths = assert_cast(shared_data_tuple.getColumn(0)); + const auto & shared_data_values = assert_cast(shared_data_tuple.getColumn(1)); + const auto & dynamic_serialization = getDynamicSerialization(); + for (size_t i = start; i != end; ++i) + { + size_t paths_start = shared_data_offsets[static_cast(i) - 1]; + size_t paths_end = shared_data_offsets[static_cast(i)]; + auto lower_bound_path_index = ColumnObject::findPathLowerBoundInSharedData(path, shared_data_paths, paths_start, paths_end); + if (lower_bound_path_index != paths_end && shared_data_paths.getDataAt(lower_bound_path_index) == path) + { + auto value_data = shared_data_values.getDataAt(lower_bound_path_index); + ReadBufferFromMemory buf(value_data.data, value_data.size); + dynamic_serialization->deserializeBinary(path_column, buf, getFormatSettings()); + } + else + { + path_column.insertDefault(); + } + } +} + } diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index 25cfaa550f6..f530ed29ef3 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -1,216 +1,117 @@ #pragma once #include -#include -#include -#include -#include -#include +#include +#include +#include +#include #include +#include +#include +#include +#include namespace DB { -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - -/// Info that represents a scalar or array field in a decomposed view. -/// It allows to recreate field with different number -/// of dimensions or nullability. -struct FieldInfo -{ - /// The common type of of all scalars in field. - DataTypePtr scalar_type; - - /// Do we have NULL scalar in field. - bool have_nulls; - - /// If true then we have scalars with different types in array and - /// we need to convert scalars to the common type. - bool need_convert; - - /// Number of dimension in array. 0 if field is scalar. - size_t num_dimensions; - - /// If true then this field is an array of variadic dimension field - /// and we need to normalize the dimension - bool need_fold_dimension; -}; - -FieldInfo getFieldInfo(const Field & field); - -/** A column that represents object with dynamic set of subcolumns. - * Subcolumns are identified by paths in document and are stored in - * a trie-like structure. ColumnObject is not suitable for writing into tables - * and it should be converted to Tuple with fixed set of subcolumns before that. - */ class ColumnObject final : public COWHelper, ColumnObject> { public: - /** Class that represents one subcolumn. - * It stores values in several parts of column - * and keeps current common type of all parts. - * We add a new column part with a new type, when we insert a field, - * which can't be converted to the current common type. - * After insertion of all values subcolumn should be finalized - * for writing and other operations. - */ - class Subcolumn + struct Statistics { - public: - Subcolumn() = default; - Subcolumn(size_t size_, bool is_nullable_); - Subcolumn(MutableColumnPtr && data_, bool is_nullable_); - - size_t size() const; - size_t byteSize() const; - size_t allocatedBytes() const; - void get(size_t n, Field & res) const; - - bool isFinalized() const; - const DataTypePtr & getLeastCommonType() const { return least_common_type.get(); } - const DataTypePtr & getLeastCommonTypeBase() const { return least_common_type.getBase(); } - size_t getNumberOfDimensions() const { return least_common_type.getNumberOfDimensions(); } - - /// Checks the consistency of column's parts stored in @data. - void checkTypes() const; - - /// Inserts a field, which scalars can be arbitrary, but number of - /// dimensions should be consistent with current common type. - void insert(Field field); - void insert(Field field, FieldInfo info); - - void insertDefault(); - void insertManyDefaults(size_t length); - void insertRangeFrom(const Subcolumn & src, size_t start, size_t length); - void popBack(size_t n); - - Subcolumn cut(size_t start, size_t length) const; - - /// Converts all column's parts to the common type and - /// creates a single column that stores all values. - void finalize(); - - /// Returns last inserted field. - Field getLastField() const; - - FieldInfo getFieldInfo() const; - - /// Recreates subcolumn with default scalar values and keeps sizes of arrays. - /// Used to create columns of type Nested with consistent array sizes. - Subcolumn recreateWithDefaultValues(const FieldInfo & field_info) const; - - /// Returns single column if subcolumn in finalizes. - /// Otherwise -- undefined behaviour. - IColumn & getFinalizedColumn(); - const IColumn & getFinalizedColumn() const; - const ColumnPtr & getFinalizedColumnPtr() const; - - const std::vector & getData() const { return data; } - size_t getNumberOfDefaultsInPrefix() const { return num_of_defaults_in_prefix; } - - friend class ColumnObject; - - private: - class LeastCommonType + enum class Source { - public: - LeastCommonType(); - explicit LeastCommonType(DataTypePtr type_); - - const DataTypePtr & get() const { return type; } - const DataTypePtr & getBase() const { return base_type; } - size_t getNumberOfDimensions() const { return num_dimensions; } - - private: - DataTypePtr type; - DataTypePtr base_type; - size_t num_dimensions = 0; + READ, /// Statistics were loaded into column during reading from MergeTree. + MERGE, /// Statistics were calculated during merge of several MergeTree parts. }; - void addNewColumnPart(DataTypePtr type); + explicit Statistics(Source source_) : source(source_) {} - /// Current least common type of all values inserted to this subcolumn. - LeastCommonType least_common_type; - - /// If true then common type type of subcolumn is Nullable - /// and default values are NULLs. - bool is_nullable = false; - - /// Parts of column. Parts should be in increasing order in terms of subtypes/supertypes. - /// That means that the least common type for i-th prefix is the type of i-th part - /// and it's the supertype for all type of column from 0 to i-1. - std::vector data; - - /// Until we insert any non-default field we don't know further - /// least common type and we count number of defaults in prefix, - /// which will be converted to the default type of final common type. - size_t num_of_defaults_in_prefix = 0; - - size_t num_rows = 0; + /// Source of the statistics. + Source source; + /// Statistics for dynamic paths: (path) -> (total number of not-null values). + std::unordered_map dynamic_paths_statistics; + /// Statistics for paths in shared data: path) -> (total number of not-null values). + /// We don't store statistics for all paths in shared data but only for some subset of them + /// (is 10000 a good limit? It should not be expensive to store 10000 paths per part) + static const size_t MAX_SHARED_DATA_STATISTICS_SIZE = 10000; + std::unordered_map shared_data_paths_statistics; }; - using Subcolumns = SubcolumnsTree; + using StatisticsPtr = std::shared_ptr; private: - /// If true then all subcolumns are nullable. - const bool is_nullable; + friend class COWHelper, ColumnObject>; - Subcolumns subcolumns; - size_t num_rows; + ColumnObject(std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_); + ColumnObject( + std::unordered_map typed_paths_, + std::unordered_map dynamic_paths_, + MutableColumnPtr shared_data_, + size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, + size_t max_dynamic_types_, + const StatisticsPtr & statistics_ = {}); + /// Use StringHashForHeterogeneousLookup hash for hash maps to be able to use std::string_view in find() method. + using PathToColumnMap = std::unordered_map; + using PathToDynamicColumnPtrMap = std::unordered_map; public: - static constexpr auto COLUMN_NAME_DUMMY = "_dummy"; + /** Create immutable column using immutable arguments. This arguments may be shared with other columns. + * Use mutate in order to make mutable column and mutate shared nested columns. + */ + using Base = COWHelper, ColumnObject>; - explicit ColumnObject(bool is_nullable_); - ColumnObject(Subcolumns && subcolumns_, bool is_nullable_); + static Ptr create( + const std::unordered_map & typed_paths_, + const std::unordered_map & dynamic_paths_, + const ColumnPtr & shared_data_, + size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, + size_t max_dynamic_types_, + const StatisticsPtr & statistics_ = {}); - /// Checks that all subcolumns have consistent sizes. - void checkConsistency() const; + static MutablePtr create( + std::unordered_map typed_paths_, + std::unordered_map dynamic_paths_, + MutableColumnPtr shared_data_, + size_t max_dynamic_paths_, + size_t global_max_dynamic_paths_, + size_t max_dynamic_types_, + const StatisticsPtr & statistics_ = {}); - bool hasSubcolumn(const PathInData & key) const; + static MutablePtr create(std::unordered_map typed_paths_, size_t max_dynamic_paths_, size_t max_dynamic_types_); - const Subcolumn & getSubcolumn(const PathInData & key) const; - Subcolumn & getSubcolumn(const PathInData & key); + std::string getName() const override; - void incrementNumRows() { ++num_rows; } + const char * getFamilyName() const override + { + return "Object"; + } - /// Adds a subcolumn from existing IColumn. - void addSubcolumn(const PathInData & key, MutableColumnPtr && subcolumn); + TypeIndex getDataType() const override + { + return TypeIndex::Object; + } - /// Adds a subcolumn of specific size with default values. - void addSubcolumn(const PathInData & key, size_t new_size); + MutableColumnPtr cloneEmpty() const override; + MutableColumnPtr cloneResized(size_t size) const override; - /// Adds a subcolumn of type Nested of specific size with default values. - /// It cares about consistency of sizes of Nested arrays. - void addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size); + size_t size() const override + { + return shared_data->size(); + } - /// Finds a subcolumn from the same Nested type as @entry and inserts - /// an array with default values with consistent sizes as in Nested type. - bool tryInsertDefaultFromNested(const Subcolumns::NodePtr & entry) const; - bool tryInsertManyDefaultsFromNested(const Subcolumns::NodePtr & entry) const; + Field operator[](size_t n) const override; + void get(size_t n, Field & res) const override; - const Subcolumns & getSubcolumns() const { return subcolumns; } - Subcolumns & getSubcolumns() { return subcolumns; } - PathsInData getKeys() const; - - /// Part of interface - - const char * getFamilyName() const override { return "Object"; } - TypeIndex getDataType() const override { return TypeIndex::Object; } - - size_t size() const override; - size_t byteSize() const override; - size_t allocatedBytes() const override; - void forEachSubcolumn(MutableColumnCallback callback) override; - void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override; - void insert(const Field & field) override; - bool tryInsert(const Field & field) override; - void insertDefault() override; + bool isDefaultAt(size_t n) const override; + StringRef getDataAt(size_t n) const override; + void insertData(const char * pos, size_t length) override; + void insert(const Field & x) override; + bool tryInsert(const Field & x) override; #if !defined(DEBUG_OR_SANITIZER_BUILD) void insertFrom(const IColumn & src, size_t n) override; void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; @@ -218,24 +119,31 @@ public: void doInsertFrom(const IColumn & src, size_t n) override; void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; #endif + /// TODO: implement more optimal insertManyFrom + void insertDefault() override; + void insertManyDefaults(size_t length) override; - void popBack(size_t length) override; - Field operator[](size_t n) const override; - void get(size_t n, Field & res) const override; + void popBack(size_t n) override; + StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override; + const char * deserializeAndInsertFromArena(const char * pos) override; + const char * skipSerializedInArena(const char * pos) const override; + + void updateHashWithValue(size_t n, SipHash & hash) const override; + WeakHash32 getWeakHash32() const override; + void updateHashFast(SipHash & hash) const override; + + ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override; + void expand(const Filter & mask, bool inverted) override; ColumnPtr permute(const Permutation & perm, size_t limit) const override; - ColumnPtr filter(const Filter & filter, ssize_t result_size_hint) const override; ColumnPtr index(const IColumn & indexes, size_t limit) const override; - ColumnPtr replicate(const Offsets & offsets) const override; - MutableColumnPtr cloneResized(size_t new_size) const override; + ColumnPtr replicate(const Offsets & replicate_offsets) const override; + MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override; - /// Finalizes all subcolumns. - void finalize() override; - bool isFinalized() const override; - - /// Order of rows in ColumnObject is undefined. - void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override; + void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &) const override; void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {} + + /// Values of ColumnObject are not comparable. #if !defined(DEBUG_OR_SANITIZER_BUILD) int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; } #else @@ -243,35 +151,118 @@ public: #endif void getExtremes(Field & min, Field & max) const override; - /// All other methods throw exception. + void reserve(size_t n) override; + size_t capacity() const override; + void prepareForSquashing(const std::vector & source_columns) override; + void ensureOwnership() override; + size_t byteSize() const override; + size_t byteSizeAt(size_t n) const override; + size_t allocatedBytes() const override; + void protect() override; - StringRef getDataAt(size_t) const override { throwMustBeConcrete(); } - bool isDefaultAt(size_t) const override { throwMustBeConcrete(); } - void insertData(const char *, size_t) override { throwMustBeConcrete(); } - StringRef serializeValueIntoArena(size_t, Arena &, char const *&) const override { throwMustBeConcrete(); } - char * serializeValueIntoMemory(size_t, char *) const override { throwMustBeConcrete(); } - const char * deserializeAndInsertFromArena(const char *) override { throwMustBeConcrete(); } - const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); } - void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); } - WeakHash32 getWeakHash32() const override { throwMustBeConcrete(); } - void updateHashFast(SipHash & hash) const override; - void expand(const Filter &, bool) override { throwMustBeConcrete(); } - bool hasEqualValues() const override { throwMustBeConcrete(); } - size_t byteSizeAt(size_t) const override { throwMustBeConcrete(); } - double getRatioOfDefaultRows(double) const override { throwMustBeConcrete(); } - UInt64 getNumberOfDefaultRows() const override { throwMustBeConcrete(); } - void getIndicesOfNonDefaultRows(Offsets &, size_t, size_t) const override { throwMustBeConcrete(); } + void forEachSubcolumn(MutableColumnCallback callback) override; -private: - [[noreturn]] static void throwMustBeConcrete() + void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override; + + bool structureEquals(const IColumn & rhs) const override; + + ColumnPtr compress() const override; + + void finalize() override; + bool isFinalized() const override; + + bool hasDynamicStructure() const override { return true; } + void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; + + const PathToColumnMap & getTypedPaths() const { return typed_paths; } + PathToColumnMap & getTypedPaths() { return typed_paths; } + + const PathToColumnMap & getDynamicPaths() const { return dynamic_paths; } + PathToColumnMap & getDynamicPaths() { return dynamic_paths; } + + const PathToDynamicColumnPtrMap & getDynamicPathsPtrs() const { return dynamic_paths_ptrs; } + PathToDynamicColumnPtrMap & getDynamicPathsPtrs() { return dynamic_paths_ptrs; } + + const StatisticsPtr & getStatistics() const { return statistics; } + + const ColumnPtr & getSharedDataPtr() const { return shared_data; } + ColumnPtr & getSharedDataPtr() { return shared_data; } + IColumn & getSharedDataColumn() { return *shared_data; } + + const ColumnArray & getSharedDataNestedColumn() const { return assert_cast(*shared_data); } + ColumnArray & getSharedDataNestedColumn() { return assert_cast(*shared_data); } + + ColumnArray::Offsets & getSharedDataOffsets() { return assert_cast(*shared_data).getOffsets(); } + const ColumnArray::Offsets & getSharedDataOffsets() const { return assert_cast(*shared_data).getOffsets(); } + + std::pair getSharedDataPathsAndValues() { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ColumnObject must be converted to ColumnTuple before use"); + auto & column_array = assert_cast(*shared_data); + auto & column_tuple = assert_cast(column_array.getData()); + return {assert_cast(&column_tuple.getColumn(0)), assert_cast(&column_tuple.getColumn(1))}; } - template - MutableColumnPtr applyForSubcolumns(Func && func) const; + std::pair getSharedDataPathsAndValues() const + { + const auto & column_array = assert_cast(*shared_data); + const auto & column_tuple = assert_cast(column_array.getData()); + return {assert_cast(&column_tuple.getColumn(0)), assert_cast(&column_tuple.getColumn(1))}; + } - /// It's used to get shared sized of Nested to insert correct default values. - const Subcolumns::Node * getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const; + size_t getMaxDynamicTypes() const { return max_dynamic_types; } + size_t getMaxDynamicPaths() const { return max_dynamic_paths; } + size_t getGlobalMaxDynamicPaths() const { return global_max_dynamic_paths; } + + /// Try to add new dynamic path. Returns pointer to the new dynamic + /// path column or nullptr if limit on dynamic paths is reached. + ColumnDynamic * tryToAddNewDynamicPath(std::string_view path); + /// Throws an exception if cannot add. + void addNewDynamicPath(std::string_view path); + + void setDynamicPaths(const std::vector & paths); + void setMaxDynamicPaths(size_t max_dynamic_paths_); + void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; } + + void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, std::string_view path, const IColumn & column, size_t n); + void deserializeValueFromSharedData(const ColumnString * shared_data_values, size_t n, IColumn & column) const; + + /// Paths in shared data are sorted in each row. Use this method to find the lower bound for specific path in the row. + static size_t findPathLowerBoundInSharedData(StringRef path, const ColumnString & shared_data_paths, size_t start, size_t end); + /// Insert all the data from shared data with specified path to dynamic column. + static void fillPathColumnFromSharedData(IColumn & path_column, StringRef path, const ColumnPtr & shared_data_column, size_t start, size_t end); + +private: + void insertFromSharedDataAndFillRemainingDynamicPaths(const ColumnObject & src_object_column, std::vector && src_dynamic_paths_for_shared_data, size_t start, size_t length); + void serializePathAndValueIntoArena(Arena & arena, const char *& begin, StringRef path, StringRef value, StringRef & res) const; + + /// Map path -> column for paths with explicitly specified types. + /// This set of paths is constant and cannot be changed. + PathToColumnMap typed_paths; + /// Map path -> column for dynamically added paths. All columns + /// here are Dynamic columns. This set of paths can be extended + /// during inerts into the column. + PathToColumnMap dynamic_paths; + /// Store and use pointers to ColumnDynamic to avoid virtual calls. + /// With hundreds of dynamic paths these virtual calls are noticeable. + PathToDynamicColumnPtrMap dynamic_paths_ptrs; + /// Shared storage for all other paths and values. It's filled + /// when the number of dynamic paths reaches the limit. + /// It has type Array(Tuple(String, String)) and stores + /// an array of pairs (path, binary serialized dynamic value) for each row. + WrappedPtr shared_data; + + /// Maximum number of dynamic paths. If this limit is reached, all new paths will be inserted into shared data. + /// This limit can be different for different instances of Object column. For example, we can decrease it + /// in takeDynamicStructureFromSourceColumns before merge. + size_t max_dynamic_paths; + /// Global limit on number of dynamic paths for all column instances of this Object type. It's the limit specified + /// in the type definition (for example 'JSON(max_dynamic_paths=N)'). max_dynamic_paths is always not greater than this limit. + size_t global_max_dynamic_paths; + /// Maximum number of dynamic types for each dynamic path. Used while creating Dynamic columns for new dynamic paths. + size_t max_dynamic_types; + /// Statistics on the number of non-null values for each dynamic path and for some shared data paths in the MergeTree data part. + /// Calculated during serializing of data part in MergeTree. Used to determine the set of dynamic paths for the merged part. + StatisticsPtr statistics; }; + } diff --git a/src/Columns/ColumnObjectDeprecated.cpp b/src/Columns/ColumnObjectDeprecated.cpp new file mode 100644 index 00000000000..d03b1d0df82 --- /dev/null +++ b/src/Columns/ColumnObjectDeprecated.cpp @@ -0,0 +1,1111 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ARGUMENT_OUT_OF_BOUND; + extern const int DUPLICATE_COLUMN; + extern const int EXPERIMENTAL_FEATURE_ERROR; + extern const int ILLEGAL_COLUMN; + extern const int NUMBER_OF_DIMENSIONS_MISMATCHED; + extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; +} + +namespace +{ + +/// Recreates column with default scalar values and keeps sizes of arrays. +ColumnPtr recreateColumnWithDefaultValues( + const ColumnPtr & column, const DataTypePtr & scalar_type, size_t num_dimensions) +{ + const auto * column_array = checkAndGetColumn(column.get()); + if (column_array && num_dimensions) + { + return ColumnArray::create( + recreateColumnWithDefaultValues( + column_array->getDataPtr(), scalar_type, num_dimensions - 1), + IColumn::mutate(column_array->getOffsetsPtr())); + } + + return createArrayOfType(scalar_type, num_dimensions)->createColumn()->cloneResized(column->size()); +} + +/// Replaces NULL fields to given field or empty array. +class FieldVisitorReplaceNull : public StaticVisitor +{ +public: + explicit FieldVisitorReplaceNull( + const Field & replacement_, size_t num_dimensions_) + : replacement(replacement_) + , num_dimensions(num_dimensions_) + { + } + + Field operator()(const Null &) const + { + return num_dimensions ? Array() : replacement; + } + + Field operator()(const Array & x) const + { + assert(num_dimensions > 0); + const size_t size = x.size(); + Array res(size); + for (size_t i = 0; i < size; ++i) + res[i] = applyVisitor(FieldVisitorReplaceNull(replacement, num_dimensions - 1), x[i]); + return res; + } + + template + Field operator()(const T & x) const { return x; } + +private: + const Field & replacement; + size_t num_dimensions; +}; + +/// Visitor that allows to get type of scalar field +/// or least common type of scalars in array. +/// More optimized version of FieldToDataType. +class FieldVisitorToScalarType : public StaticVisitor<> +{ +public: + using FieldType = Field::Types::Which; + + void operator()(const Array & x) + { + size_t size = x.size(); + for (size_t i = 0; i < size; ++i) + applyVisitor(*this, x[i]); + } + + void operator()(const UInt64 & x) + { + field_types.insert(FieldType::UInt64); + if (x <= std::numeric_limits::max()) + type_indexes.insert(TypeIndex::UInt8); + else if (x <= std::numeric_limits::max()) + type_indexes.insert(TypeIndex::UInt16); + else if (x <= std::numeric_limits::max()) + type_indexes.insert(TypeIndex::UInt32); + else + type_indexes.insert(TypeIndex::UInt64); + } + + void operator()(const Int64 & x) + { + field_types.insert(FieldType::Int64); + if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) + type_indexes.insert(TypeIndex::Int8); + else if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) + type_indexes.insert(TypeIndex::Int16); + else if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) + type_indexes.insert(TypeIndex::Int32); + else + type_indexes.insert(TypeIndex::Int64); + } + + void operator()(const bool &) + { + field_types.insert(FieldType::UInt64); + type_indexes.insert(TypeIndex::UInt8); + } + + void operator()(const Null &) + { + have_nulls = true; + } + + template + void operator()(const T &) + { + field_types.insert(Field::TypeToEnum>::value); + type_indexes.insert(TypeToTypeIndex>); + } + + DataTypePtr getScalarType() const { return getLeastSupertypeOrString(type_indexes); } + bool haveNulls() const { return have_nulls; } + bool needConvertField() const { return field_types.size() > 1; } + +private: + TypeIndexSet type_indexes; + std::unordered_set field_types; + bool have_nulls = false; +}; + +} + +FieldInfo getFieldInfo(const Field & field) +{ + FieldVisitorToScalarType to_scalar_type_visitor; + applyVisitor(to_scalar_type_visitor, field); + FieldVisitorToNumberOfDimensions to_number_dimension_visitor; + + return + { + to_scalar_type_visitor.getScalarType(), + to_scalar_type_visitor.haveNulls(), + to_scalar_type_visitor.needConvertField(), + applyVisitor(to_number_dimension_visitor, field), + to_number_dimension_visitor.need_fold_dimension + }; +} + +ColumnObjectDeprecated::Subcolumn::Subcolumn(MutableColumnPtr && data_, bool is_nullable_) + : least_common_type(getDataTypeByColumn(*data_)) + , is_nullable(is_nullable_) + , num_rows(data_->size()) +{ + data.push_back(std::move(data_)); +} + +ColumnObjectDeprecated::Subcolumn::Subcolumn( + size_t size_, bool is_nullable_) + : least_common_type(std::make_shared()) + , is_nullable(is_nullable_) + , num_of_defaults_in_prefix(size_) + , num_rows(size_) +{ +} + +size_t ColumnObjectDeprecated::Subcolumn::size() const +{ + return num_rows; +} + +size_t ColumnObjectDeprecated::Subcolumn::byteSize() const +{ + size_t res = 0; + for (const auto & part : data) + res += part->byteSize(); + return res; +} + +size_t ColumnObjectDeprecated::Subcolumn::allocatedBytes() const +{ + size_t res = 0; + for (const auto & part : data) + res += part->allocatedBytes(); + return res; +} + +void ColumnObjectDeprecated::Subcolumn::get(size_t n, Field & res) const +{ + if (isFinalized()) + { + getFinalizedColumn().get(n, res); + return; + } + + size_t ind = n; + if (ind < num_of_defaults_in_prefix) + { + res = least_common_type.get()->getDefault(); + return; + } + + ind -= num_of_defaults_in_prefix; + for (const auto & part : data) + { + if (ind < part->size()) + { + part->get(ind, res); + res = convertFieldToTypeOrThrow(res, *least_common_type.get()); + return; + } + + ind -= part->size(); + } + + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for getting field is out of range", n); +} + +void ColumnObjectDeprecated::Subcolumn::checkTypes() const +{ + DataTypes prefix_types; + prefix_types.reserve(data.size()); + for (size_t i = 0; i < data.size(); ++i) + { + auto current_type = getDataTypeByColumn(*data[i]); + prefix_types.push_back(current_type); + auto prefix_common_type = getLeastSupertype(prefix_types); + if (!prefix_common_type->equals(*current_type)) + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Data type {} of column at position {} cannot represent all columns from i-th prefix", + current_type->getName(), i); + } +} + +void ColumnObjectDeprecated::Subcolumn::insert(Field field) +{ + auto info = DB::getFieldInfo(field); + insert(std::move(field), std::move(info)); +} + +void ColumnObjectDeprecated::Subcolumn::addNewColumnPart(DataTypePtr type) +{ + auto serialization = type->getSerialization(ISerialization::Kind::SPARSE); + data.push_back(type->createColumn(*serialization)); + least_common_type = LeastCommonType{std::move(type)}; +} + +static bool isConversionRequiredBetweenIntegers(const IDataType & lhs, const IDataType & rhs) +{ + /// If both of types are signed/unsigned integers and size of left field type + /// is less than right type, we don't need to convert field, + /// because all integer fields are stored in Int64/UInt64. + + WhichDataType which_lhs(lhs); + WhichDataType which_rhs(rhs); + + bool is_native_int = which_lhs.isNativeInt() && which_rhs.isNativeInt(); + bool is_native_uint = which_lhs.isNativeUInt() && which_rhs.isNativeUInt(); + + return (!is_native_int && !is_native_uint) + || lhs.getSizeOfValueInMemory() > rhs.getSizeOfValueInMemory(); +} + +void ColumnObjectDeprecated::Subcolumn::insert(Field field, FieldInfo info) +{ + auto base_type = std::move(info.scalar_type); + + if (isNothing(base_type) && info.num_dimensions == 0) + { + insertDefault(); + return; + } + + auto column_dim = least_common_type.getNumberOfDimensions(); + auto value_dim = info.num_dimensions; + + if (isNothing(least_common_type.get())) + column_dim = value_dim; + + if (isNothing(base_type)) + value_dim = column_dim; + + if (value_dim != column_dim) + throw Exception(ErrorCodes::NUMBER_OF_DIMENSIONS_MISMATCHED, + "Dimension of types mismatched between inserted value and column. " + "Dimension of value: {}. Dimension of column: {}", + value_dim, column_dim); + + if (is_nullable) + base_type = makeNullable(base_type); + + if (!is_nullable && info.have_nulls) + field = applyVisitor(FieldVisitorReplaceNull(base_type->getDefault(), value_dim), std::move(field)); + + bool type_changed = false; + const auto & least_common_base_type = least_common_type.getBase(); + + if (data.empty()) + { + addNewColumnPart(createArrayOfType(std::move(base_type), value_dim)); + } + else if (!least_common_base_type->equals(*base_type) && !isNothing(base_type)) + { + if (isConversionRequiredBetweenIntegers(*base_type, *least_common_base_type)) + { + base_type = getLeastSupertypeOrString(DataTypes{std::move(base_type), least_common_base_type}); + type_changed = true; + if (!least_common_base_type->equals(*base_type)) + addNewColumnPart(createArrayOfType(std::move(base_type), value_dim)); + } + } + + if (type_changed || info.need_convert) + field = convertFieldToTypeOrThrow(field, *least_common_type.get()); + + if (!data.back()->tryInsert(field)) + { + /** Normalization of the field above is pretty complicated (it uses several FieldVisitors), + * so in the case of a bug, we may get mismatched types. + * The `IColumn::insert` method does not check the type of the inserted field, and it can lead to a segmentation fault. + * Therefore, we use the safer `tryInsert` method to get an exception instead of a segmentation fault. + */ + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Cannot insert field {} to column {}", + field.dump(), data.back()->dumpStructure()); + } + + ++num_rows; +} + +void ColumnObjectDeprecated::Subcolumn::insertRangeFrom(const Subcolumn & src, size_t start, size_t length) +{ + assert(start + length <= src.size()); + size_t end = start + length; + num_rows += length; + + if (data.empty()) + { + addNewColumnPart(src.getLeastCommonType()); + } + else if (!least_common_type.get()->equals(*src.getLeastCommonType())) + { + auto new_least_common_type = getLeastSupertypeOrString(DataTypes{least_common_type.get(), src.getLeastCommonType()}); + if (!new_least_common_type->equals(*least_common_type.get())) + addNewColumnPart(std::move(new_least_common_type)); + } + + if (end <= src.num_of_defaults_in_prefix) + { + data.back()->insertManyDefaults(length); + return; + } + + if (start < src.num_of_defaults_in_prefix) + data.back()->insertManyDefaults(src.num_of_defaults_in_prefix - start); + + auto insert_from_part = [&](const auto & column, size_t from, size_t n) + { + assert(from + n <= column->size()); + auto column_type = getDataTypeByColumn(*column); + + if (column_type->equals(*least_common_type.get())) + { + data.back()->insertRangeFrom(*column, from, n); + return; + } + + /// If we need to insert large range, there is no sense to cut part of column and cast it. + /// Casting of all column and inserting from it can be faster. + /// Threshold is just a guess. + + if (n * 3 >= column->size()) + { + auto casted_column = castColumn({column, column_type, ""}, least_common_type.get()); + data.back()->insertRangeFrom(*casted_column, from, n); + return; + } + + auto casted_column = column->cut(from, n); + casted_column = castColumn({casted_column, column_type, ""}, least_common_type.get()); + data.back()->insertRangeFrom(*casted_column, 0, n); + }; + + size_t pos = 0; + size_t processed_rows = src.num_of_defaults_in_prefix; + + /// Find the first part of the column that intersects the range. + while (pos < src.data.size() && processed_rows + src.data[pos]->size() < start) + { + processed_rows += src.data[pos]->size(); + ++pos; + } + + /// Insert from the first part of column. + if (pos < src.data.size() && processed_rows < start) + { + size_t part_start = start - processed_rows; + size_t part_length = std::min(src.data[pos]->size() - part_start, end - start); + insert_from_part(src.data[pos], part_start, part_length); + processed_rows += src.data[pos]->size(); + ++pos; + } + + /// Insert from the parts of column in the middle of range. + while (pos < src.data.size() && processed_rows + src.data[pos]->size() < end) + { + insert_from_part(src.data[pos], 0, src.data[pos]->size()); + processed_rows += src.data[pos]->size(); + ++pos; + } + + /// Insert from the last part of column if needed. + if (pos < src.data.size() && processed_rows < end) + { + size_t part_end = end - processed_rows; + insert_from_part(src.data[pos], 0, part_end); + } +} + +bool ColumnObjectDeprecated::Subcolumn::isFinalized() const +{ + return num_of_defaults_in_prefix == 0 && + (data.empty() || (data.size() == 1 && !data[0]->isSparse())); +} + +void ColumnObjectDeprecated::Subcolumn::finalize() +{ + if (isFinalized()) + return; + + if (data.size() == 1 && num_of_defaults_in_prefix == 0) + { + data[0] = data[0]->convertToFullColumnIfSparse(); + return; + } + + const auto & to_type = least_common_type.get(); + auto result_column = to_type->createColumn(); + + if (num_of_defaults_in_prefix) + result_column->insertManyDefaults(num_of_defaults_in_prefix); + + for (auto & part : data) + { + part = part->convertToFullColumnIfSparse(); + auto from_type = getDataTypeByColumn(*part); + size_t part_size = part->size(); + + if (!from_type->equals(*to_type)) + { + auto offsets = ColumnUInt64::create(); + auto & offsets_data = offsets->getData(); + + /// We need to convert only non-default values and then recreate column + /// with default value of new type, because default values (which represents misses in data) + /// may be inconsistent between types (e.g "0" in UInt64 and empty string in String). + + part->getIndicesOfNonDefaultRows(offsets_data, 0, part_size); + + if (offsets->size() == part_size) + { + part = castColumn({part, from_type, ""}, to_type); + } + else + { + auto values = part->index(*offsets, offsets->size()); + values = castColumn({values, from_type, ""}, to_type); + part = values->createWithOffsets(offsets_data, *createColumnConstWithDefaultValue(result_column->getPtr()), part_size, /*shift=*/ 0); + } + } + + result_column->insertRangeFrom(*part, 0, part_size); + } + + data = { std::move(result_column) }; + num_of_defaults_in_prefix = 0; +} + +void ColumnObjectDeprecated::Subcolumn::insertDefault() +{ + if (data.empty()) + ++num_of_defaults_in_prefix; + else + data.back()->insertDefault(); + + ++num_rows; +} + +void ColumnObjectDeprecated::Subcolumn::insertManyDefaults(size_t length) +{ + if (data.empty()) + num_of_defaults_in_prefix += length; + else + data.back()->insertManyDefaults(length); + + num_rows += length; +} + +void ColumnObjectDeprecated::Subcolumn::popBack(size_t n) +{ + assert(n <= size()); + + num_rows -= n; + size_t num_removed = 0; + for (auto it = data.rbegin(); it != data.rend(); ++it) + { + if (n == 0) + break; + + auto & column = *it; + if (n < column->size()) + { + column->popBack(n); + n = 0; + } + else + { + ++num_removed; + n -= column->size(); + } + } + + data.resize(data.size() - num_removed); + num_of_defaults_in_prefix -= n; +} + +ColumnObjectDeprecated::Subcolumn ColumnObjectDeprecated::Subcolumn::cut(size_t start, size_t length) const +{ + Subcolumn new_subcolumn(0, is_nullable); + new_subcolumn.insertRangeFrom(*this, start, length); + return new_subcolumn; +} + +Field ColumnObjectDeprecated::Subcolumn::getLastField() const +{ + if (data.empty()) + return Field(); + + const auto & last_part = data.back(); + assert(!last_part->empty()); + return (*last_part)[last_part->size() - 1]; +} + +FieldInfo ColumnObjectDeprecated::Subcolumn::getFieldInfo() const +{ + const auto & base_type = least_common_type.getBase(); + return FieldInfo + { + .scalar_type = base_type, + .have_nulls = base_type->isNullable(), + .need_convert = false, + .num_dimensions = least_common_type.getNumberOfDimensions(), + .need_fold_dimension = false, + }; +} + +ColumnObjectDeprecated::Subcolumn ColumnObjectDeprecated::Subcolumn::recreateWithDefaultValues(const FieldInfo & field_info) const +{ + auto scalar_type = field_info.scalar_type; + if (is_nullable) + scalar_type = makeNullable(scalar_type); + + Subcolumn new_subcolumn(*this); + new_subcolumn.least_common_type = LeastCommonType{createArrayOfType(scalar_type, field_info.num_dimensions)}; + + for (auto & part : new_subcolumn.data) + part = recreateColumnWithDefaultValues(part, scalar_type, field_info.num_dimensions); + + return new_subcolumn; +} + +IColumn & ColumnObjectDeprecated::Subcolumn::getFinalizedColumn() +{ + assert(isFinalized()); + return *data[0]; +} + +const IColumn & ColumnObjectDeprecated::Subcolumn::getFinalizedColumn() const +{ + assert(isFinalized()); + return *data[0]; +} + +const ColumnPtr & ColumnObjectDeprecated::Subcolumn::getFinalizedColumnPtr() const +{ + assert(isFinalized()); + return data[0]; +} + +ColumnObjectDeprecated::Subcolumn::LeastCommonType::LeastCommonType() + : type(std::make_shared()) + , base_type(type) + , num_dimensions(0) +{ +} + +ColumnObjectDeprecated::Subcolumn::LeastCommonType::LeastCommonType(DataTypePtr type_) + : type(std::move(type_)) + , base_type(getBaseTypeOfArray(type)) + , num_dimensions(DB::getNumberOfDimensions(*type)) +{ +} + +ColumnObjectDeprecated::ColumnObjectDeprecated(bool is_nullable_) + : is_nullable(is_nullable_) + , num_rows(0) +{ +} + +ColumnObjectDeprecated::ColumnObjectDeprecated(Subcolumns && subcolumns_, bool is_nullable_) + : is_nullable(is_nullable_) + , subcolumns(std::move(subcolumns_)) + , num_rows(subcolumns.empty() ? 0 : (*subcolumns.begin())->data.size()) + +{ + checkConsistency(); +} + +void ColumnObjectDeprecated::checkConsistency() const +{ + if (subcolumns.empty()) + return; + + for (const auto & leaf : subcolumns) + { + if (num_rows != leaf->data.size()) + { + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Sizes of subcolumns are inconsistent in ColumnObjectDeprecated." + " Subcolumn '{}' has {} rows, but expected size is {}", + leaf->path.getPath(), leaf->data.size(), num_rows); + } + } +} + +size_t ColumnObjectDeprecated::size() const +{ +#ifndef NDEBUG + checkConsistency(); +#endif + return num_rows; +} + +size_t ColumnObjectDeprecated::byteSize() const +{ + size_t res = 0; + for (const auto & entry : subcolumns) + res += entry->data.byteSize(); + return res; +} + +size_t ColumnObjectDeprecated::allocatedBytes() const +{ + size_t res = 0; + for (const auto & entry : subcolumns) + res += entry->data.allocatedBytes(); + return res; +} + +void ColumnObjectDeprecated::forEachSubcolumn(MutableColumnCallback callback) +{ + for (auto & entry : subcolumns) + for (auto & part : entry->data.data) + callback(part); +} + +void ColumnObjectDeprecated::forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) +{ + for (auto & entry : subcolumns) + { + for (auto & part : entry->data.data) + { + callback(*part); + part->forEachSubcolumnRecursively(callback); + } + } +} + +void ColumnObjectDeprecated::insert(const Field & field) +{ + const auto & object = field.safeGet(); + + HashSet inserted_paths; + size_t old_size = size(); + for (const auto & [key_str, value] : object) + { + PathInData key(key_str); + inserted_paths.insert(key_str); + if (!hasSubcolumn(key)) + addSubcolumn(key, old_size); + + auto & subcolumn = getSubcolumn(key); + subcolumn.insert(value); + } + + for (auto & entry : subcolumns) + { + if (!inserted_paths.has(entry->path.getPath())) + { + bool inserted = tryInsertDefaultFromNested(entry); + if (!inserted) + entry->data.insertDefault(); + } + } + + ++num_rows; +} + +bool ColumnObjectDeprecated::tryInsert(const Field & field) +{ + if (field.getType() != Field::Types::Which::Object) + return false; + + insert(field); + return true; +} + +void ColumnObjectDeprecated::insertDefault() +{ + for (auto & entry : subcolumns) + entry->data.insertDefault(); + + ++num_rows; +} + +Field ColumnObjectDeprecated::operator[](size_t n) const +{ + Field object; + get(n, object); + return object; +} + +void ColumnObjectDeprecated::get(size_t n, Field & res) const +{ + assert(n < size()); + res = Object(); + auto & object = res.safeGet(); + + for (const auto & entry : subcolumns) + { + auto it = object.try_emplace(entry->path.getPath()).first; + entry->data.get(n, it->second); + } +} + +#if !defined(DEBUG_OR_SANITIZER_BUILD) +void ColumnObjectDeprecated::insertFrom(const IColumn & src, size_t n) +#else +void ColumnObjectDeprecated::doInsertFrom(const IColumn & src, size_t n) +#endif +{ + insert(src[n]); +} + +#if !defined(DEBUG_OR_SANITIZER_BUILD) +void ColumnObjectDeprecated::insertRangeFrom(const IColumn & src, size_t start, size_t length) +#else +void ColumnObjectDeprecated::doInsertRangeFrom(const IColumn & src, size_t start, size_t length) +#endif +{ + const auto & src_object = assert_cast(src); + + for (const auto & entry : src_object.subcolumns) + { + if (!hasSubcolumn(entry->path)) + { + if (entry->path.hasNested()) + addNestedSubcolumn(entry->path, entry->data.getFieldInfo(), num_rows); + else + addSubcolumn(entry->path, num_rows); + } + + auto & subcolumn = getSubcolumn(entry->path); + subcolumn.insertRangeFrom(entry->data, start, length); + } + + for (auto & entry : subcolumns) + { + if (!src_object.hasSubcolumn(entry->path)) + { + bool inserted = tryInsertManyDefaultsFromNested(entry); + if (!inserted) + entry->data.insertManyDefaults(length); + } + } + + num_rows += length; + finalize(); +} + +void ColumnObjectDeprecated::popBack(size_t length) +{ + for (auto & entry : subcolumns) + entry->data.popBack(length); + + num_rows -= length; +} + +template +MutableColumnPtr ColumnObjectDeprecated::applyForSubcolumns(Func && func) const +{ + if (!isFinalized()) + { + auto finalized = cloneFinalized(); + auto & finalized_object = assert_cast(*finalized); + return finalized_object.applyForSubcolumns(std::forward(func)); + } + + auto res = ColumnObjectDeprecated::create(is_nullable); + for (const auto & subcolumn : subcolumns) + { + auto new_subcolumn = func(subcolumn->data.getFinalizedColumn()); + res->addSubcolumn(subcolumn->path, new_subcolumn->assumeMutable()); + } + + return res; +} + +ColumnPtr ColumnObjectDeprecated::permute(const Permutation & perm, size_t limit) const +{ + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.permute(perm, limit); }); +} + +ColumnPtr ColumnObjectDeprecated::filter(const Filter & filter, ssize_t result_size_hint) const +{ + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.filter(filter, result_size_hint); }); +} + +ColumnPtr ColumnObjectDeprecated::index(const IColumn & indexes, size_t limit) const +{ + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.index(indexes, limit); }); +} + +ColumnPtr ColumnObjectDeprecated::replicate(const Offsets & offsets) const +{ + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.replicate(offsets); }); +} + +MutableColumnPtr ColumnObjectDeprecated::cloneResized(size_t new_size) const +{ + if (new_size == 0) + return ColumnObjectDeprecated::create(is_nullable); + + return applyForSubcolumns([&](const auto & subcolumn) { return subcolumn.cloneResized(new_size); }); +} + +void ColumnObjectDeprecated::getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const +{ + res.resize(num_rows); + iota(res.data(), res.size(), size_t(0)); +} + +void ColumnObjectDeprecated::getExtremes(Field & min, Field & max) const +{ + if (num_rows == 0) + { + min = Object(); + max = Object(); + } + else + { + get(0, min); + get(0, max); + } +} + +const ColumnObjectDeprecated::Subcolumn & ColumnObjectDeprecated::getSubcolumn(const PathInData & key) const +{ + if (const auto * node = subcolumns.findLeaf(key)) + return node->data; + + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in ColumnObjectDeprecated", key.getPath()); +} + +ColumnObjectDeprecated::Subcolumn & ColumnObjectDeprecated::getSubcolumn(const PathInData & key) +{ + if (const auto * node = subcolumns.findLeaf(key)) + return const_cast(node)->data; + + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in ColumnObjectDeprecated", key.getPath()); +} + +bool ColumnObjectDeprecated::hasSubcolumn(const PathInData & key) const +{ + return subcolumns.findLeaf(key) != nullptr; +} + +void ColumnObjectDeprecated::addSubcolumn(const PathInData & key, MutableColumnPtr && subcolumn) +{ + size_t new_size = subcolumn->size(); + bool inserted = subcolumns.add(key, Subcolumn(std::move(subcolumn), is_nullable)); + + if (!inserted) + throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); + + if (num_rows == 0) + num_rows = new_size; + else if (new_size != num_rows) + throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, + "Size of subcolumn {} ({}) is inconsistent with column size ({})", + key.getPath(), new_size, num_rows); +} + +void ColumnObjectDeprecated::addSubcolumn(const PathInData & key, size_t new_size) +{ + bool inserted = subcolumns.add(key, Subcolumn(new_size, is_nullable)); + if (!inserted) + throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); + + if (num_rows == 0) + num_rows = new_size; + else if (new_size != num_rows) + throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, + "Required size of subcolumn {} ({}) is inconsistent with column size ({})", + key.getPath(), new_size, num_rows); +} + +void ColumnObjectDeprecated::addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size) +{ + if (!key.hasNested()) + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Cannot add Nested subcolumn, because path doesn't contain Nested"); + + bool inserted = false; + /// We find node that represents the same Nested type as @key. + const auto * nested_node = subcolumns.findBestMatch(key); + + if (nested_node) + { + /// Find any leaf of Nested subcolumn. + const auto * leaf = Subcolumns::findLeaf(nested_node, [&](const auto &) { return true; }); + assert(leaf); + + /// Recreate subcolumn with default values and the same sizes of arrays. + auto new_subcolumn = leaf->data.recreateWithDefaultValues(field_info); + + /// It's possible that we have already inserted value from current row + /// to this subcolumn. So, adjust size to expected. + if (new_subcolumn.size() > new_size) + new_subcolumn.popBack(new_subcolumn.size() - new_size); + + assert(new_subcolumn.size() == new_size); + inserted = subcolumns.add(key, new_subcolumn); + } + else + { + /// If node was not found just add subcolumn with empty arrays. + inserted = subcolumns.add(key, Subcolumn(new_size, is_nullable)); + } + + if (!inserted) + throw Exception(ErrorCodes::DUPLICATE_COLUMN, "Subcolumn '{}' already exists", key.getPath()); + + if (num_rows == 0) + num_rows = new_size; + else if (new_size != num_rows) + throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, + "Required size of subcolumn {} ({}) is inconsistent with column size ({})", + key.getPath(), new_size, num_rows); +} + +const ColumnObjectDeprecated::Subcolumns::Node * ColumnObjectDeprecated::getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const +{ + if (!entry->path.hasNested()) + return nullptr; + + size_t old_size = entry->data.size(); + const auto * current_node = subcolumns.findLeaf(entry->path); + const Subcolumns::Node * leaf = nullptr; + + while (current_node) + { + /// Try to find the first Nested up to the current node. + const auto * node_nested = Subcolumns::findParent(current_node, + [](const auto & candidate) { return candidate.isNested(); }); + + if (!node_nested) + break; + + /// Find the leaf with subcolumn that contains values + /// for the last rows. + /// If there are no leaves, skip current node and find + /// the next node up to the current. + leaf = Subcolumns::findLeaf(node_nested, + [&](const auto & candidate) + { + return candidate.data.size() > old_size; + }); + + if (leaf) + break; + + current_node = node_nested->parent; + } + + if (leaf && isNothing(leaf->data.getLeastCommonTypeBase())) + return nullptr; + + return leaf; +} + +bool ColumnObjectDeprecated::tryInsertManyDefaultsFromNested(const Subcolumns::NodePtr & entry) const +{ + const auto * leaf = getLeafOfTheSameNested(entry); + if (!leaf) + return false; + + size_t old_size = entry->data.size(); + auto field_info = entry->data.getFieldInfo(); + + /// Cut the needed range from the found leaf + /// and replace scalar values to the correct + /// default values for given entry. + auto new_subcolumn = leaf->data + .cut(old_size, leaf->data.size() - old_size) + .recreateWithDefaultValues(field_info); + + entry->data.insertRangeFrom(new_subcolumn, 0, new_subcolumn.size()); + return true; +} + +bool ColumnObjectDeprecated::tryInsertDefaultFromNested(const Subcolumns::NodePtr & entry) const +{ + const auto * leaf = getLeafOfTheSameNested(entry); + if (!leaf) + return false; + + auto last_field = leaf->data.getLastField(); + if (last_field.isNull()) + return false; + + size_t leaf_num_dimensions = leaf->data.getNumberOfDimensions(); + size_t entry_num_dimensions = entry->data.getNumberOfDimensions(); + + auto default_scalar = entry_num_dimensions > leaf_num_dimensions + ? createEmptyArrayField(entry_num_dimensions - leaf_num_dimensions) + : entry->data.getLeastCommonTypeBase()->getDefault(); + + auto default_field = applyVisitor(FieldVisitorReplaceScalars(default_scalar, leaf_num_dimensions), last_field); + entry->data.insert(std::move(default_field)); + return true; +} + +PathsInData ColumnObjectDeprecated::getKeys() const +{ + PathsInData keys; + keys.reserve(subcolumns.size()); + for (const auto & entry : subcolumns) + keys.emplace_back(entry->path); + return keys; +} + +bool ColumnObjectDeprecated::isFinalized() const +{ + return std::all_of(subcolumns.begin(), subcolumns.end(), + [](const auto & entry) { return entry->data.isFinalized(); }); +} + +void ColumnObjectDeprecated::finalize() +{ + size_t old_size = size(); + Subcolumns new_subcolumns; + for (auto && entry : subcolumns) + { + const auto & least_common_type = entry->data.getLeastCommonType(); + + /// Do not add subcolumns, which consist only from NULLs. + if (isNothing(getBaseTypeOfArray(least_common_type))) + continue; + + entry->data.finalize(); + new_subcolumns.add(entry->path, entry->data); + } + + /// If all subcolumns were skipped add a dummy subcolumn, + /// because Tuple type must have at least one element. + if (new_subcolumns.empty()) + new_subcolumns.add(PathInData{COLUMN_NAME_DUMMY}, Subcolumn{ColumnUInt8::create(old_size, 0), is_nullable}); + + std::swap(subcolumns, new_subcolumns); + checkObjectHasNoAmbiguosPaths(getKeys()); +} + +void ColumnObjectDeprecated::updateHashFast(SipHash & hash) const +{ + for (const auto & entry : subcolumns) + for (auto & part : entry->data.data) + part->updateHashFast(hash); +} + +} diff --git a/src/Columns/ColumnObjectDeprecated.h b/src/Columns/ColumnObjectDeprecated.h new file mode 100644 index 00000000000..29e2d8f0709 --- /dev/null +++ b/src/Columns/ColumnObjectDeprecated.h @@ -0,0 +1,275 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +/// Info that represents a scalar or array field in a decomposed view. +/// It allows to recreate field with different number +/// of dimensions or nullability. +struct FieldInfo +{ + /// The common type of of all scalars in field. + DataTypePtr scalar_type; + + /// Do we have NULL scalar in field. + bool have_nulls; + + /// If true then we have scalars with different types in array and + /// we need to convert scalars to the common type. + bool need_convert; + + /// Number of dimension in array. 0 if field is scalar. + size_t num_dimensions; + + /// If true then this field is an array of variadic dimension field + /// and we need to normalize the dimension + bool need_fold_dimension; +}; + +FieldInfo getFieldInfo(const Field & field); + +/** A column that represents object with dynamic set of subcolumns. + * Subcolumns are identified by paths in document and are stored in + * a trie-like structure. ColumnObjectDeprecated is not suitable for writing into tables + * and it should be converted to Tuple with fixed set of subcolumns before that. + */ +class ColumnObjectDeprecated final : public COWHelper, ColumnObjectDeprecated> +{ +public: + /** Class that represents one subcolumn. + * It stores values in several parts of column + * and keeps current common type of all parts. + * We add a new column part with a new type, when we insert a field, + * which can't be converted to the current common type. + * After insertion of all values subcolumn should be finalized + * for writing and other operations. + */ + class Subcolumn + { + public: + Subcolumn() = default; + Subcolumn(size_t size_, bool is_nullable_); + Subcolumn(MutableColumnPtr && data_, bool is_nullable_); + + size_t size() const; + size_t byteSize() const; + size_t allocatedBytes() const; + void get(size_t n, Field & res) const; + + bool isFinalized() const; + const DataTypePtr & getLeastCommonType() const { return least_common_type.get(); } + const DataTypePtr & getLeastCommonTypeBase() const { return least_common_type.getBase(); } + size_t getNumberOfDimensions() const { return least_common_type.getNumberOfDimensions(); } + + /// Checks the consistency of column's parts stored in @data. + void checkTypes() const; + + /// Inserts a field, which scalars can be arbitrary, but number of + /// dimensions should be consistent with current common type. + void insert(Field field); + void insert(Field field, FieldInfo info); + + void insertDefault(); + void insertManyDefaults(size_t length); + void insertRangeFrom(const Subcolumn & src, size_t start, size_t length); + void popBack(size_t n); + + Subcolumn cut(size_t start, size_t length) const; + + /// Converts all column's parts to the common type and + /// creates a single column that stores all values. + void finalize(); + + /// Returns last inserted field. + Field getLastField() const; + + FieldInfo getFieldInfo() const; + + /// Recreates subcolumn with default scalar values and keeps sizes of arrays. + /// Used to create columns of type Nested with consistent array sizes. + Subcolumn recreateWithDefaultValues(const FieldInfo & field_info) const; + + /// Returns single column if subcolumn in finalizes. + /// Otherwise -- undefined behaviour. + IColumn & getFinalizedColumn(); + const IColumn & getFinalizedColumn() const; + const ColumnPtr & getFinalizedColumnPtr() const; + + const std::vector & getData() const { return data; } + size_t getNumberOfDefaultsInPrefix() const { return num_of_defaults_in_prefix; } + + friend class ColumnObjectDeprecated; + + private: + class LeastCommonType + { + public: + LeastCommonType(); + explicit LeastCommonType(DataTypePtr type_); + + const DataTypePtr & get() const { return type; } + const DataTypePtr & getBase() const { return base_type; } + size_t getNumberOfDimensions() const { return num_dimensions; } + + private: + DataTypePtr type; + DataTypePtr base_type; + size_t num_dimensions = 0; + }; + + void addNewColumnPart(DataTypePtr type); + + /// Current least common type of all values inserted to this subcolumn. + LeastCommonType least_common_type; + + /// If true then common type type of subcolumn is Nullable + /// and default values are NULLs. + bool is_nullable = false; + + /// Parts of column. Parts should be in increasing order in terms of subtypes/supertypes. + /// That means that the least common type for i-th prefix is the type of i-th part + /// and it's the supertype for all type of column from 0 to i-1. + std::vector data; + + /// Until we insert any non-default field we don't know further + /// least common type and we count number of defaults in prefix, + /// which will be converted to the default type of final common type. + size_t num_of_defaults_in_prefix = 0; + + size_t num_rows = 0; + }; + + using Subcolumns = SubcolumnsTree; + +private: + /// If true then all subcolumns are nullable. + const bool is_nullable; + + Subcolumns subcolumns; + size_t num_rows; + +public: + static constexpr auto COLUMN_NAME_DUMMY = "_dummy"; + + explicit ColumnObjectDeprecated(bool is_nullable_); + ColumnObjectDeprecated(Subcolumns && subcolumns_, bool is_nullable_); + + /// Checks that all subcolumns have consistent sizes. + void checkConsistency() const; + + bool hasSubcolumn(const PathInData & key) const; + + const Subcolumn & getSubcolumn(const PathInData & key) const; + Subcolumn & getSubcolumn(const PathInData & key); + + void incrementNumRows() { ++num_rows; } + + /// Adds a subcolumn from existing IColumn. + void addSubcolumn(const PathInData & key, MutableColumnPtr && subcolumn); + + /// Adds a subcolumn of specific size with default values. + void addSubcolumn(const PathInData & key, size_t new_size); + + /// Adds a subcolumn of type Nested of specific size with default values. + /// It cares about consistency of sizes of Nested arrays. + void addNestedSubcolumn(const PathInData & key, const FieldInfo & field_info, size_t new_size); + + /// Finds a subcolumn from the same Nested type as @entry and inserts + /// an array with default values with consistent sizes as in Nested type. + bool tryInsertDefaultFromNested(const Subcolumns::NodePtr & entry) const; + bool tryInsertManyDefaultsFromNested(const Subcolumns::NodePtr & entry) const; + + const Subcolumns & getSubcolumns() const { return subcolumns; } + Subcolumns & getSubcolumns() { return subcolumns; } + PathsInData getKeys() const; + + /// Part of interface + + const char * getFamilyName() const override { return "Object"; } + TypeIndex getDataType() const override { return TypeIndex::ObjectDeprecated; } + + size_t size() const override; + size_t byteSize() const override; + size_t allocatedBytes() const override; + void forEachSubcolumn(MutableColumnCallback callback) override; + void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override; + void insert(const Field & field) override; + bool tryInsert(const Field & field) override; + void insertDefault() override; +#if !defined(DEBUG_OR_SANITIZER_BUILD) + void insertFrom(const IColumn & src, size_t n) override; + void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; +#else + void doInsertFrom(const IColumn & src, size_t n) override; + void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override; +#endif + void popBack(size_t length) override; + Field operator[](size_t n) const override; + void get(size_t n, Field & res) const override; + + ColumnPtr permute(const Permutation & perm, size_t limit) const override; + ColumnPtr filter(const Filter & filter, ssize_t result_size_hint) const override; + ColumnPtr index(const IColumn & indexes, size_t limit) const override; + ColumnPtr replicate(const Offsets & offsets) const override; + MutableColumnPtr cloneResized(size_t new_size) const override; + + /// Finalizes all subcolumns. + void finalize() override; + bool isFinalized() const override; + + /// Order of rows in ColumnObjectDeprecated is undefined. + void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override; + void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {} +#if !defined(DEBUG_OR_SANITIZER_BUILD) + int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; } +#else + int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; } +#endif + void getExtremes(Field & min, Field & max) const override; + + /// All other methods throw exception. + + StringRef getDataAt(size_t) const override { throwMustBeConcrete(); } + bool isDefaultAt(size_t) const override { throwMustBeConcrete(); } + void insertData(const char *, size_t) override { throwMustBeConcrete(); } + StringRef serializeValueIntoArena(size_t, Arena &, char const *&) const override { throwMustBeConcrete(); } + char * serializeValueIntoMemory(size_t, char *) const override { throwMustBeConcrete(); } + const char * deserializeAndInsertFromArena(const char *) override { throwMustBeConcrete(); } + const char * skipSerializedInArena(const char *) const override { throwMustBeConcrete(); } + void updateHashWithValue(size_t, SipHash &) const override { throwMustBeConcrete(); } + WeakHash32 getWeakHash32() const override { throwMustBeConcrete(); } + void updateHashFast(SipHash &) const override; + void expand(const Filter &, bool) override { throwMustBeConcrete(); } + bool hasEqualValues() const override { throwMustBeConcrete(); } + size_t byteSizeAt(size_t) const override { throwMustBeConcrete(); } + double getRatioOfDefaultRows(double) const override { throwMustBeConcrete(); } + UInt64 getNumberOfDefaultRows() const override { throwMustBeConcrete(); } + void getIndicesOfNonDefaultRows(Offsets &, size_t, size_t) const override { throwMustBeConcrete(); } + +private: + [[noreturn]] static void throwMustBeConcrete() + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ColumnObjectDeprecated must be converted to ColumnTuple before use"); + } + + template + MutableColumnPtr applyForSubcolumns(Func && func) const; + + /// It's used to get shared sized of Nested to insert correct default values. + const Subcolumns::Node * getLeafOfTheSameNested(const Subcolumns::NodePtr & entry) const; +}; +} diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp index 9ed2c7e3d4d..00cf3bd9c30 100644 --- a/src/Columns/ColumnString.cpp +++ b/src/Columns/ColumnString.cpp @@ -557,6 +557,11 @@ void ColumnString::reserve(size_t n) offsets.reserve_exact(n); } +size_t ColumnString::capacity() const +{ + return offsets.capacity(); +} + void ColumnString::prepareForSquashing(const Columns & source_columns) { size_t new_size = size(); diff --git a/src/Columns/ColumnString.h b/src/Columns/ColumnString.h index 5bbb3ad5151..ec0563b3f00 100644 --- a/src/Columns/ColumnString.h +++ b/src/Columns/ColumnString.h @@ -283,6 +283,7 @@ public: ColumnPtr compress() const override; void reserve(size_t n) override; + size_t capacity() const override; void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 1539d7c8c2e..e741eb51c68 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -595,6 +595,14 @@ void ColumnTuple::reserve(size_t n) getColumn(i).reserve(n); } +size_t ColumnTuple::capacity() const +{ + if (columns.empty()) + return size(); + + return getColumn(0).capacity(); +} + void ColumnTuple::prepareForSquashing(const Columns & source_columns) { const size_t tuple_size = columns.size(); diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index ef396d6a130..6968294aef9 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -110,6 +110,7 @@ public: void updatePermutationWithCollation(const Collator & collator, IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges& equal_ranges) const override; void reserve(size_t n) override; + size_t capacity() const override; void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h index d6cb75679be..8a66f4e02ed 100644 --- a/src/Columns/ColumnUnique.h +++ b/src/Columns/ColumnUnique.h @@ -48,6 +48,8 @@ private: ColumnUnique(const ColumnUnique & other); public: + std::string getName() const override { return "Unique(" + getNestedColumn()->getName() + ")"; } + MutableColumnPtr cloneEmpty() const override; const ColumnPtr & getNestedColumn() const override; diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index d4294478633..28a4860b546 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -1277,6 +1277,11 @@ void ColumnVariant::prepareForSquashing(const Columns & source_columns) } } +size_t ColumnVariant::capacity() const +{ + return local_discriminators->capacity(); +} + void ColumnVariant::ensureOwnership() { const size_t num_variants = variants.size(); diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index 787b2f75247..925eab74af8 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -241,6 +241,7 @@ public: size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override; + size_t capacity() const override; void prepareForSquashing(const Columns & source_columns) override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index a5e1ee4b462..8f81da86375 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -180,6 +180,11 @@ public: data.reserve_exact(n); } + size_t capacity() const override + { + return data.capacity(); + } + void shrinkToFit() override { data.shrink_to_fit(); diff --git a/src/Columns/IColumn.cpp b/src/Columns/IColumn.cpp index a189903b617..15e29d1422a 100644 --- a/src/Columns/IColumn.cpp +++ b/src/Columns/IColumn.cpp @@ -11,12 +11,13 @@ #include #include #include -#include +#include #include #include #include #include #include +#include #include #include #include @@ -466,12 +467,13 @@ template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; -template class IColumnHelper; +template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; template class IColumnHelper; +template class IColumnHelper; template class IColumnHelper; diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index edcb9f0bc30..e4fe233ffdf 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -475,6 +475,9 @@ public: /// It affects performance only (not correctness). virtual void reserve(size_t /*n*/) {} + /// Returns the number of elements allocated in reserve. + virtual size_t capacity() const { return size(); } + /// Reserve memory before squashing all specified source columns into this column. virtual void prepareForSquashing(const std::vector & source_columns) { diff --git a/src/Columns/IColumnUnique.h b/src/Columns/IColumnUnique.h index a8e10e5e2b2..52b1bef3009 100644 --- a/src/Columns/IColumnUnique.h +++ b/src/Columns/IColumnUnique.h @@ -73,7 +73,7 @@ public: /// Returns dictionary hash which is SipHash is applied to each row of nested column. virtual UInt128 getHash() const = 0; - const char * getFamilyName() const override { return "ColumnUnique"; } + const char * getFamilyName() const override { return "Unique"; } TypeIndex getDataType() const override { return getNestedColumn()->getDataType(); } void insert(const Field &) override diff --git a/src/Columns/tests/gtest_column_dump_structure.cpp b/src/Columns/tests/gtest_column_dump_structure.cpp index e00c77798c8..d9647147157 100644 --- a/src/Columns/tests/gtest_column_dump_structure.cpp +++ b/src/Columns/tests/gtest_column_dump_structure.cpp @@ -10,7 +10,7 @@ TEST(IColumn, dumpStructure) { auto type_lc = std::make_shared(std::make_shared()); ColumnPtr column_lc = type_lc->createColumn(); - String expected_structure = "ColumnLowCardinality(size = 0, UInt8(size = 0), ColumnUnique(size = 1, String(size = 1)))"; + String expected_structure = "LowCardinality(size = 0, UInt8(size = 0), Unique(size = 1, String(size = 1)))"; std::vector threads; for (size_t i = 0; i < 6; ++i) diff --git a/src/Columns/tests/gtest_column_object.cpp b/src/Columns/tests/gtest_column_object.cpp new file mode 100644 index 00000000000..f6a1da64ba3 --- /dev/null +++ b/src/Columns/tests/gtest_column_object.cpp @@ -0,0 +1,351 @@ +#include +#include +#include +#include +#include + +#include +#include + +using namespace DB; + +TEST(ColumnObject, CreateEmpty) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=20, a.b UInt32, a.c Array(String))"); + auto col = type->createColumn(); + const auto & col_object = assert_cast(*col); + const auto & typed_paths = col_object.getTypedPaths(); + ASSERT_TRUE(typed_paths.contains("a.b")); + ASSERT_EQ(typed_paths.at("a.b")->getName(), "UInt32"); + ASSERT_TRUE(typed_paths.contains("a.c")); + ASSERT_EQ(typed_paths.at("a.c")->getName(), "Array(String)"); + ASSERT_TRUE(col_object.getDynamicPaths().empty()); + ASSERT_TRUE(col_object.getSharedDataOffsets().empty()); + ASSERT_TRUE(col_object.getSharedDataPathsAndValues().first->empty()); + ASSERT_TRUE(col_object.getSharedDataPathsAndValues().second->empty()); + ASSERT_EQ(col_object.getMaxDynamicTypes(), 10); + ASSERT_EQ(col_object.getMaxDynamicPaths(), 20); +} + +TEST(ColumnObject, GetName) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=20, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + ASSERT_EQ(col->getName(), "Object(max_dynamic_paths=20, max_dynamic_types=10, a.b Array(String), b.d UInt32)"); +} + +Field deserializeFieldFromSharedData(ColumnString * values, size_t n) +{ + auto data = values->getDataAt(n); + ReadBufferFromMemory buf(data.data, data.size); + Field res; + std::make_shared()->deserializeBinary(res, buf, FormatSettings()); + return res; +} + +TEST(ColumnObject, InsertField) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + const auto & typed_paths = col_object.getTypedPaths(); + const auto & dynamic_paths = col_object.getDynamicPaths(); + const auto & shared_data_nested_column = col_object.getSharedDataNestedColumn(); + const auto & shared_data_offsets = col_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = col_object.getSharedDataPathsAndValues(); + Object empty_object; + col_object.insert(empty_object); + ASSERT_EQ(col_object[0], (Object{{"a.b", Array{}}, {"b.d", Field(0u)}})); + ASSERT_EQ(typed_paths.at("a.b")->size(), 1); + ASSERT_TRUE(typed_paths.at("a.b")->isDefaultAt(0)); + ASSERT_EQ(typed_paths.at("b.d")->size(), 1); + ASSERT_TRUE(typed_paths.at("b.d")->isDefaultAt(0)); + ASSERT_TRUE(dynamic_paths.empty()); + ASSERT_EQ(shared_data_nested_column.size(), 1); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(0)); + + Object object1 = {{"a.b", Array{String("Hello"), String("World")}}, {"a.c", Field(42)}}; + col_object.insert(object1); + ASSERT_EQ(col_object[1], (Object{{"a.b", Array{String("Hello"), String("World")}}, {"b.d", Field(0u)}, {"a.c", Field(42)}})); + ASSERT_EQ(typed_paths.at("a.b")->size(), 2); + ASSERT_EQ((*typed_paths.at("a.b"))[1], (Array{String("Hello"), String("World")})); + ASSERT_EQ(typed_paths.at("b.d")->size(), 2); + ASSERT_TRUE(typed_paths.at("b.d")->isDefaultAt(1)); + ASSERT_EQ(dynamic_paths.size(), 1); + ASSERT_TRUE(dynamic_paths.contains("a.c")); + ASSERT_EQ(dynamic_paths.at("a.c")->size(), 2); + ASSERT_TRUE(dynamic_paths.at("a.c")->isDefaultAt(0)); + ASSERT_EQ((*dynamic_paths.at("a.c"))[1], Field(42)); + ASSERT_EQ(shared_data_nested_column.size(), 2); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(1)); + + Object object2 = {{"b.d", Field(142u)}, {"a.c", Field(43)}, {"a.d", Field("str")}, {"a.e", Field(242)}, {"a.f", Array{Field(42), Field(43)}}}; + col_object.insert(object2); + ASSERT_EQ(col_object[2], (Object{{"a.b", Array{}}, {"b.d", Field(142u)}, {"a.c", Field(43)}, {"a.d", Field("str")}, {"a.e", Field(242)}, {"a.f", Array{Field(42), Field(43)}}})); + ASSERT_EQ(typed_paths.at("a.b")->size(), 3); + ASSERT_TRUE(typed_paths.at("a.b")->isDefaultAt(2)); + ASSERT_EQ(typed_paths.at("b.d")->size(), 3); + ASSERT_EQ((*typed_paths.at("b.d"))[2], Field(142u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_TRUE(dynamic_paths.contains("a.c")); + ASSERT_EQ(dynamic_paths.at("a.c")->size(), 3); + ASSERT_EQ((*dynamic_paths.at("a.c"))[2], Field(43)); + ASSERT_TRUE(dynamic_paths.contains("a.d")); + ASSERT_EQ(dynamic_paths.at("a.d")->size(), 3); + ASSERT_EQ((*dynamic_paths.at("a.d"))[2], Field("str")); + + ASSERT_EQ(shared_data_nested_column.size(), 3); + ASSERT_EQ(shared_data_offsets[2] - shared_data_offsets[1], 2); + ASSERT_EQ((*shared_data_paths)[0], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 0), Field(242)); + ASSERT_EQ((*shared_data_paths)[1], "a.f"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 1), (Array({Field(42), Field(43)}))); + + Object object3 = {{"b.a", Field("Str")}, {"b.b", Field(2)}, {"b.c", Field(Tuple{Field(42), Field("Str")})}}; + col_object.insert(object3); + ASSERT_EQ(col_object[3], (Object{{"a.b", Array{}}, {"b.d", Field(0u)}, {"b.a", Field("Str")}, {"b.b", Field(2)}, {"b.c", Field(Tuple{Field(42), Field("Str")})}})); + ASSERT_EQ(typed_paths.at("a.b")->size(), 4); + ASSERT_TRUE(typed_paths.at("a.b")->isDefaultAt(3)); + ASSERT_EQ(typed_paths.at("b.d")->size(), 4); + ASSERT_TRUE(typed_paths.at("b.d")->isDefaultAt(3)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ(dynamic_paths.at("a.c")->size(), 4); + ASSERT_TRUE(dynamic_paths.at("a.c")->isDefaultAt(3)); + ASSERT_EQ(dynamic_paths.at("a.d")->size(), 4); + ASSERT_TRUE(dynamic_paths.at("a.d")->isDefaultAt(3)); + + ASSERT_EQ(shared_data_nested_column.size(), 4); + ASSERT_EQ(shared_data_offsets[3] - shared_data_offsets[2], 3); + ASSERT_EQ((*shared_data_paths)[2], "b.a"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 2), Field("Str")); + ASSERT_EQ((*shared_data_paths)[3], "b.b"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 3), Field(2)); + ASSERT_EQ((*shared_data_paths)[4], "b.c"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 4), Field(Tuple{Field(42), Field("Str")})); + + Object object4 = {{"c.c", Field(Null())}, {"c.d", Field(Null())}}; + col_object.insert(object4); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(4)); +} + +TEST(ColumnObject, InsertFrom) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"a.a", Field(42)}}); + + const auto & typed_paths = col_object.getTypedPaths(); + const auto & dynamic_paths = col_object.getDynamicPaths(); + const auto & shared_data_nested_column = col_object.getSharedDataNestedColumn(); + const auto & shared_data_offsets = col_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = col_object.getSharedDataPathsAndValues(); + + auto src_col1 = type->createColumn(); + auto & src_col_object1 = assert_cast(*src_col1); + src_col_object1.insert(Object{{"b.d", Field(43u)}, {"a.c", Field("Str1")}}); + col_object.insertFrom(src_col_object1, 0); + ASSERT_EQ((*typed_paths.at("a.b"))[1], Field(Array{})); + ASSERT_EQ((*typed_paths.at("b.d"))[1], Field(43u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[1], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[1], Field("Str1")); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(1)); + + auto src_col2 = type->createColumn(); + auto & src_col_object2 = assert_cast(*src_col2); + src_col_object2.insert(Object{{"a.b", Array{"Str4", "Str5"}}, {"b.d", Field(44u)}, {"a.d", Field("Str2")}, {"a.e", Field("Str3")}}); + col_object.insertFrom(src_col_object2, 0); + ASSERT_EQ((*typed_paths.at("a.b"))[2], Field(Array{"Str4", "Str5"})); + ASSERT_EQ((*typed_paths.at("b.d"))[2], Field(44u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[2], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[2], Field(Null())); + ASSERT_EQ(shared_data_offsets[2] - shared_data_offsets[1], 2); + ASSERT_EQ((*shared_data_paths)[0], "a.d"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 0), Field("Str2")); + ASSERT_EQ((*shared_data_paths)[1], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 1), Field("Str3")); + + auto src_col3 = type->createColumn(); + auto & src_col_object3 = assert_cast(*src_col3); + src_col_object3.insert(Object{{"a.h", Field("Str6")}, {"h.h", Field("Str7")}}); + src_col_object3.insert(Object{{"a.a", Field("Str10")}, {"a.c", Field(45u)}, {"a.h", Field("Str6")}, {"h.h", Field("Str7")}, {"a.f", Field("Str8")}, {"a.g", Field("Str9")}, {"a.i", Field("Str11")}, {"a.u", Field(Null())}}); + col_object.insertFrom(src_col_object3, 1); + ASSERT_EQ((*typed_paths.at("a.b"))[3], Field(Array{})); + ASSERT_EQ((*typed_paths.at("b.d"))[3], Field(0u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[3], Field("Str10")); + ASSERT_EQ((*dynamic_paths.at("a.c"))[3], Field(45u)); + ASSERT_EQ(shared_data_offsets[3] - shared_data_offsets[2], 5); + ASSERT_EQ((*shared_data_paths)[2], "a.f"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 2), Field("Str8")); + ASSERT_EQ((*shared_data_paths)[3], "a.g"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 3), Field("Str9")); + ASSERT_EQ((*shared_data_paths)[4], "a.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 4), Field("Str6")); + ASSERT_EQ((*shared_data_paths)[5], "a.i"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 5), Field("Str11")); + ASSERT_EQ((*shared_data_paths)[6], "h.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 6), Field("Str7")); +} + + +TEST(ColumnObject, InsertRangeFrom) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"a.a", Field(42)}}); + + const auto & typed_paths = col_object.getTypedPaths(); + const auto & dynamic_paths = col_object.getDynamicPaths(); + const auto & shared_data_nested_column = col_object.getSharedDataNestedColumn(); + const auto & shared_data_offsets = col_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = col_object.getSharedDataPathsAndValues(); + + auto src_col1 = type->createColumn(); + auto & src_col_object1 = assert_cast(*src_col1); + src_col_object1.insert(Object{{"b.d", Field(43u)}, {"a.c", Field("Str1")}}); + src_col_object1.insert(Object{{"a.b", Field(Array{"Str1", "Str2"})}, {"a.a", Field("Str1")}}); + src_col_object1.insert(Object{{"b.d", Field(45u)}, {"a.c", Field("Str2")}}); + col_object.insertRangeFrom(src_col_object1, 0, 3); + ASSERT_EQ((*typed_paths.at("a.b"))[1], Field(Array{})); + ASSERT_EQ((*typed_paths.at("a.b"))[2], Field(Array{"Str1", "Str2"})); + ASSERT_EQ((*typed_paths.at("a.b"))[3], Field(Array{})); + ASSERT_EQ((*typed_paths.at("b.d"))[1], Field(43u)); + ASSERT_EQ((*typed_paths.at("b.d"))[2], Field(0u)); + ASSERT_EQ((*typed_paths.at("b.d"))[3], Field(45u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[1], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.a"))[2], Field("Str1")); + ASSERT_EQ((*dynamic_paths.at("a.a"))[3], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[1], Field("Str1")); + ASSERT_EQ((*dynamic_paths.at("a.c"))[2], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[3], Field("Str2")); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(1)); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(2)); + ASSERT_TRUE(shared_data_nested_column.isDefaultAt(3)); + + auto src_col2 = type->createColumn(); + auto & src_col_object2 = assert_cast(*src_col2); + src_col_object2.insert(Object{{"a.b", Array{"Str4", "Str5"}}, {"a.d", Field("Str2")}, {"a.e", Field("Str3")}}); + src_col_object2.insert(Object{{"b.d", Field(44u)}, {"a.d", Field("Str22")}, {"a.e", Field("Str33")}}); + src_col_object2.insert(Object{{"a.b", Array{"Str44", "Str55"}}, {"a.d", Field("Str222")}, {"a.e", Field("Str333")}}); + col_object.insertRangeFrom(src_col_object2, 0, 3); + ASSERT_EQ((*typed_paths.at("a.b"))[4], Field(Array{"Str4", "Str5"})); + ASSERT_EQ((*typed_paths.at("a.b"))[5], Field(Array{})); + ASSERT_EQ((*typed_paths.at("a.b"))[6], Field(Array{"Str44", "Str55"})); + ASSERT_EQ((*typed_paths.at("b.d"))[4], Field(0u)); + ASSERT_EQ((*typed_paths.at("b.d"))[5], Field(44u)); + ASSERT_EQ((*typed_paths.at("b.d"))[6], Field(0u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[4], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.a"))[5], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.a"))[6], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[4], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[5], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[6], Field(Null())); + ASSERT_EQ(shared_data_offsets[4] - shared_data_offsets[3], 2); + ASSERT_EQ((*shared_data_paths)[0], "a.d"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 0), Field("Str2")); + ASSERT_EQ((*shared_data_paths)[1], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 1), Field("Str3")); + ASSERT_EQ(shared_data_offsets[5] - shared_data_offsets[4], 2); + ASSERT_EQ((*shared_data_paths)[2], "a.d"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 2), Field("Str22")); + ASSERT_EQ((*shared_data_paths)[3], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 3), Field("Str33")); + ASSERT_EQ(shared_data_offsets[6] - shared_data_offsets[5], 2); + ASSERT_EQ((*shared_data_paths)[4], "a.d"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 4), Field("Str222")); + ASSERT_EQ((*shared_data_paths)[5], "a.e"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 5), Field("Str333")); + + auto src_col3 = type->createColumn(); + auto & src_col_object3 = assert_cast(*src_col3); + src_col_object3.insert(Object{{"a.h", Field("Str6")}, {"h.h", Field("Str7")}}); + src_col_object3.insert(Object{{"a.h", Field("Str6")}, {"h.h", Field("Str7")}, {"a.f", Field("Str8")}, {"a.g", Field("Str9")}, {"a.i", Field("Str11")}}); + src_col_object3.insert(Object{{"a.a", Field("Str10")}}); + src_col_object3.insert(Object{{"a.h", Field("Str6")}, {"a.c", Field(45u)}, {"h.h", Field("Str7")}, {"a.i", Field("Str11")}}); + col_object.insertRangeFrom(src_col_object3, 1, 3); + ASSERT_EQ((*typed_paths.at("a.b"))[7], Field(Array{})); + ASSERT_EQ((*typed_paths.at("a.b"))[8], Field(Array{})); + ASSERT_EQ((*typed_paths.at("a.b"))[9], Field(Array{})); + ASSERT_EQ((*typed_paths.at("b.d"))[7], Field(0u)); + ASSERT_EQ((*typed_paths.at("b.d"))[8], Field(0u)); + ASSERT_EQ((*typed_paths.at("b.d"))[9], Field(0u)); + ASSERT_EQ(dynamic_paths.size(), 2); + ASSERT_EQ((*dynamic_paths.at("a.a"))[7], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.a"))[8], Field("Str10")); + ASSERT_EQ((*dynamic_paths.at("a.a"))[9], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[7], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[8], Field(Null())); + ASSERT_EQ((*dynamic_paths.at("a.c"))[9], Field(45u)); + ASSERT_EQ(shared_data_offsets[7] - shared_data_offsets[6], 5); + ASSERT_EQ((*shared_data_paths)[6], "a.f"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 6), Field("Str8")); + ASSERT_EQ((*shared_data_paths)[7], "a.g"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 7), Field("Str9")); + ASSERT_EQ((*shared_data_paths)[8], "a.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 8), Field("Str6")); + ASSERT_EQ((*shared_data_paths)[9], "a.i"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 9), Field("Str11")); + ASSERT_EQ((*shared_data_paths)[10], "h.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 10), Field("Str7")); + ASSERT_EQ(shared_data_offsets[8] - shared_data_offsets[7], 0); + ASSERT_EQ(shared_data_offsets[9] - shared_data_offsets[8], 3); + ASSERT_EQ((*shared_data_paths)[11], "a.h"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 11), Field("Str6")); + ASSERT_EQ((*shared_data_paths)[12], "a.i"); + ASSERT_EQ(deserializeFieldFromSharedData(shared_data_values, 12), Field("Str11")); +} + +TEST(ColumnObject, SerializeDeserializerFromArena) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"b.d", Field(42u)}, {"a.b", Array{"Str1", "Str2"}}, {"a.a", Tuple{"Str3", 441u}}, {"a.c", Field("Str4")}, {"a.d", Array{Field(45), Field(46)}}, {"a.e", Field(47)}}); + col_object.insert(Object{{"b.a", Field(48)}, {"b.b", Array{Field(49), Field(50)}}}); + col_object.insert(Object{{"b.d", Field(442u)}, {"a.b", Array{"Str11", "Str22"}}, {"a.a", Tuple{"Str33", 444u}}, {"a.c", Field("Str44")}, {"a.d", Array{Field(445), Field(446)}}, {"a.e", Field(447)}}); + + Arena arena; + const char * pos = nullptr; + auto ref1 = col_object.serializeValueIntoArena(0, arena, pos); + col_object.serializeValueIntoArena(1, arena, pos); + col_object.serializeValueIntoArena(2, arena, pos); + + auto col2 = type->createColumn(); + auto & col_object2 = assert_cast(*col); + pos = col_object2.deserializeAndInsertFromArena(ref1.data); + pos = col_object2.deserializeAndInsertFromArena(pos); + col_object2.deserializeAndInsertFromArena(pos); + + ASSERT_EQ(col_object2[0], (Object{{"b.d", Field(42u)}, {"a.b", Array{"Str1", "Str2"}}, {"a.a", Tuple{"Str3", 441u}}, {"a.c", Field("Str4")}, {"a.d", Array{Field(45), Field(46)}}, {"a.e", Field(47)}})); + ASSERT_EQ(col_object2[1], (Object{{"b.d", Field{0u}}, {"a.b", Array{}}, {"b.a", Field(48)}, {"b.b", Array{Field(49), Field(50)}}})); + ASSERT_EQ(col_object2[2], (Object{{"b.d", Field(442u)}, {"a.b", Array{"Str11", "Str22"}}, {"a.a", Tuple{"Str33", 444u}}, {"a.c", Field("Str44")}, {"a.d", Array{Field(445), Field(446)}}, {"a.e", Field(447)}})); +} + +TEST(ColumnObject, SkipSerializedInArena) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, b.d UInt32, a.b Array(String))"); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"b.d", Field(42u)}, {"a.b", Array{"Str1", "Str2"}}, {"a.a", Tuple{"Str3", 441u}}, {"a.c", Field("Str4")}, {"a.d", Array{Field(45), Field(46)}}, {"a.e", Field(47)}}); + col_object.insert(Object{{"b.a", Field(48)}, {"b.b", Array{Field(49), Field(50)}}}); + col_object.insert(Object{{"b.d", Field(442u)}, {"a.b", Array{"Str11", "Str22"}}, {"a.a", Tuple{"Str33", 444u}}, {"a.c", Field("Str44")}, {"a.d", Array{Field(445), Field(446)}}, {"a.e", Field(447)}}); + + Arena arena; + const char * pos = nullptr; + auto ref1 = col_object.serializeValueIntoArena(0, arena, pos); + col_object.serializeValueIntoArena(1, arena, pos); + auto ref3 = col_object.serializeValueIntoArena(2, arena, pos); + + const char * end = ref3.data + ref3.size; + auto col2 = type->createColumn(); + pos = col2->skipSerializedInArena(ref1.data); + pos = col2->skipSerializedInArena(pos); + pos = col2->skipSerializedInArena(pos); + ASSERT_EQ(pos, end); +} diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 56e7c4f3405..9b6a7428411 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -1,18 +1,24 @@ -#include #include -#include -#include -#include -#include -#include -#include + #include #include +#include +#include #include #include #include #include +#include +#include +#include +#include +#include +#include + +#include + #include +#include #include "config.h" @@ -52,6 +58,12 @@ static std::unique_ptr openFileIfExists(const std::stri return {}; } +static void openCgroupv2MetricFile(const std::string & filename, std::optional & out) +{ + if (auto path = getCgroupsV2PathContainingFile(filename)) + openFileIfExists((path.value() + filename).c_str(), out); +}; + #endif @@ -63,21 +75,15 @@ AsynchronousMetrics::AsynchronousMetrics( , protocol_server_metrics_func(protocol_server_metrics_func_) { #if defined(OS_LINUX) - openFileIfExists("/proc/meminfo", meminfo); - openFileIfExists("/proc/loadavg", loadavg); - openFileIfExists("/proc/stat", proc_stat); openFileIfExists("/proc/cpuinfo", cpuinfo); openFileIfExists("/proc/sys/fs/file-nr", file_nr); - openFileIfExists("/proc/uptime", uptime); openFileIfExists("/proc/net/dev", net_dev); /// CGroups v2 - openFileIfExists("/sys/fs/cgroup/memory.max", cgroupmem_limit_in_bytes); - if (cgroupmem_limit_in_bytes) - { - openFileIfExists("/sys/fs/cgroup/memory.current", cgroupmem_usage_in_bytes); - } - openFileIfExists("/sys/fs/cgroup/cpu.max", cgroupcpu_max); + openCgroupv2MetricFile("memory.max", cgroupmem_limit_in_bytes); + openCgroupv2MetricFile("memory.current", cgroupmem_usage_in_bytes); + openCgroupv2MetricFile("cpu.max", cgroupcpu_max); + openCgroupv2MetricFile("cpu.stat", cgroupcpu_stat); /// CGroups v1 if (!cgroupmem_limit_in_bytes) @@ -90,6 +96,21 @@ AsynchronousMetrics::AsynchronousMetrics( openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_period_us", cgroupcpu_cfs_period); openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_quota_us", cgroupcpu_cfs_quota); } + if (!cgroupcpu_stat) + openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat); + + if (!cgroupcpu_stat && !cgroupcpuacct_stat) + { + /// The following metrics are not cgroup-aware and we've found cgroup-specific metric files for the similar metrics, + /// so we're better not reporting them at all to avoid confusion + openFileIfExists("/proc/loadavg", loadavg); + openFileIfExists("/proc/stat", proc_stat); + openFileIfExists("/proc/uptime", uptime); + } + + /// The same story for memory metrics + if (!cgroupmem_limit_in_bytes) + openFileIfExists("/proc/meminfo", meminfo); openFileIfExists("/proc/sys/vm/max_map_count", vm_max_map_count); openFileIfExists("/proc/self/maps", vm_maps); @@ -570,6 +591,151 @@ AsynchronousMetrics::NetworkInterfaceStatValues::operator-(const AsynchronousMet #endif +#if defined(OS_LINUX) +void AsynchronousMetrics::applyCPUMetricsUpdate( + AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier) +{ + new_values["OSUserTime" + cpu_suffix] + = {delta_values.user * multiplier, + "The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the " + "host machine, not just clickhouse-server." + " This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline " + "stalls, branch mispredictions, running another SMT core)." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSNiceTime" + cpu_suffix] + = {delta_values.nice * multiplier, + "The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all " + "the processes on the host machine, not just clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSSystemTime" + cpu_suffix] + = {delta_values.system * multiplier, + "The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIdleTime" + cpu_suffix] + = {delta_values.idle * multiplier, + "The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This " + "is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." + " This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline " + "stalls, branch mispredictions, running another SMT core)." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIOWaitTime" + cpu_suffix] + = {delta_values.iowait * multiplier, + "The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as " + "the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just " + "clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIrqTime" + cpu_suffix] + = {delta_values.irq * multiplier, + "The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " A high number of this metric may indicate hardware misconfiguration or a very high network load." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSSoftIrqTime" + cpu_suffix] + = {delta_values.softirq * multiplier, + "The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " A high number of this metric may indicate inefficient software running on the system." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSStealTime" + cpu_suffix] + = {delta_values.steal * multiplier, + "The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide " + "metric, it includes all the processes on the host machine, not just clickhouse-server." + " Not every virtualized environments present this metric, and most of them don't." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSGuestTime" + cpu_suffix] + = {delta_values.guest * multiplier, + "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man " + "procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." + " This metric is irrelevant for ClickHouse, but still exists for completeness." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSGuestNiceTime" + cpu_suffix] + = {delta_values.guest_nice * multiplier, + "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest " + "was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host " + "machine, not just clickhouse-server." + " This metric is irrelevant for ClickHouse, but still exists for completeness." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; +} + +void AsynchronousMetrics::applyNormalizedCPUMetricsUpdate( + AsynchronousMetricValues & new_values, double num_cpus_to_normalize, const ProcStatValuesCPU & delta_values_all_cpus, double multiplier) +{ + chassert(num_cpus_to_normalize); + + new_values["OSUserTimeNormalized"] + = {delta_values_all_cpus.user * multiplier / num_cpus_to_normalize, + "The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSNiceTimeNormalized"] + = {delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize, + "The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSSystemTimeNormalized"] + = {delta_values_all_cpus.system * multiplier / num_cpus_to_normalize, + "The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIdleTimeNormalized"] + = {delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIOWaitTimeNormalized"] + = {delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIrqTimeNormalized"] + = {delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of " + "the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSSoftIrqTimeNormalized"] + = {delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize, + "The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval " + "regardless of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSStealTimeNormalized"] + = {delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize, + "The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSGuestTimeNormalized"] + = {delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize, + "The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSGuestNiceTimeNormalized"] + = {delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize, + "The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval " + "regardless of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; +} +#endif + void AsynchronousMetrics::update(TimePoint update_time, bool force_update) { Stopwatch watch; @@ -831,7 +997,68 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) new_values["CGroupMaxCPU"] = { max_cpu_cgroups, "The maximum number of CPU cores according to CGroups."}; } - if (proc_stat) + if (cgroupcpu_stat || cgroupcpuacct_stat) + { + try + { + ReadBufferFromFilePRead & in = cgroupcpu_stat ? *cgroupcpu_stat : *cgroupcpuacct_stat; + ProcStatValuesCPU current_values{}; + + /// We re-read the file from the beginning each time + in.rewind(); + + while (!in.eof()) + { + String name; + readStringUntilWhitespace(name, in); + skipWhitespaceIfAny(in); + + /// `user_usec` for cgroup v2 and `user` for cgroup v1 + if (name.starts_with("user")) + { + readText(current_values.user, in); + skipToNextLineOrEOF(in); + } + /// `system_usec` for cgroup v2 and `system` for cgroup v1 + else if (name.starts_with("system")) + { + readText(current_values.system, in); + skipToNextLineOrEOF(in); + } + else + skipToNextLineOrEOF(in); + } + + if (!first_run) + { + auto get_clock_ticks = [&]() + { + if (auto hz = sysconf(_SC_CLK_TCK); hz != -1) + return hz; + else + throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ"); + }; + const auto cgroup_version_specific_divisor = cgroupcpu_stat ? 1e6 : get_clock_ticks(); + const double multiplier = 1.0 / cgroup_version_specific_divisor + / (std::chrono::duration_cast(time_since_previous_update).count() / 1e9); + + const ProcStatValuesCPU delta_values = current_values - proc_stat_values_all_cpus; + applyCPUMetricsUpdate(new_values, /*cpu_suffix=*/"", delta_values, multiplier); + if (max_cpu_cgroups > 0) + applyNormalizedCPUMetricsUpdate(new_values, max_cpu_cgroups, delta_values, multiplier); + } + + proc_stat_values_all_cpus = current_values; + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + openCgroupv2MetricFile("cpu.stat", cgroupcpu_stat); + if (!cgroupcpu_stat) + openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat); + } + } + else if (proc_stat) { try { @@ -886,43 +1113,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) else delta_values_all_cpus = delta_values; - new_values["OSUserTime" + cpu_suffix] = { delta_values.user * multiplier, - "The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core)." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSNiceTime" + cpu_suffix] = { delta_values.nice * multiplier, - "The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSSystemTime" + cpu_suffix] = { delta_values.system * multiplier, - "The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIdleTime" + cpu_suffix] = { delta_values.idle * multiplier, - "The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core)." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIOWaitTime" + cpu_suffix] = { delta_values.iowait * multiplier, - "The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIrqTime" + cpu_suffix] = { delta_values.irq * multiplier, - "The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " A high number of this metric may indicate hardware misconfiguration or a very high network load." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSSoftIrqTime" + cpu_suffix] = { delta_values.softirq * multiplier, - "The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " A high number of this metric may indicate inefficient software running on the system." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSStealTime" + cpu_suffix] = { delta_values.steal * multiplier, - "The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " Not every virtualized environments present this metric, and most of them don't." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSGuestTime" + cpu_suffix] = { delta_values.guest * multiplier, - "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This metric is irrelevant for ClickHouse, but still exists for completeness." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSGuestNiceTime" + cpu_suffix] = { delta_values.guest_nice * multiplier, - "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This metric is irrelevant for ClickHouse, but still exists for completeness." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; + applyCPUMetricsUpdate(new_values, cpu_suffix, delta_values, multiplier); } prev_values = current_values; @@ -978,38 +1169,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) Float64 num_cpus_to_normalize = max_cpu_cgroups > 0 ? max_cpu_cgroups : num_cpus; if (num_cpus_to_normalize > 0) - { - new_values["OSUserTimeNormalized"] = { delta_values_all_cpus.user * multiplier / num_cpus_to_normalize, - "The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSNiceTimeNormalized"] = { delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize, - "The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSSystemTimeNormalized"] = { delta_values_all_cpus.system * multiplier / num_cpus_to_normalize, - "The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIdleTimeNormalized"] = { delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIOWaitTimeNormalized"] = { delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIrqTimeNormalized"] = { delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSSoftIrqTimeNormalized"] = { delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize, - "The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSStealTimeNormalized"] = { delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize, - "The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSGuestTimeNormalized"] = { delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize, - "The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSGuestNiceTimeNormalized"] = { delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize, - "The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - } + applyNormalizedCPUMetricsUpdate(new_values, num_cpus_to_normalize, delta_values_all_cpus, multiplier); } proc_stat_values_other = current_other_values; @@ -1042,8 +1202,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) tryLogCurrentException(__PRETTY_FUNCTION__); } } - - if (meminfo) + else if (meminfo) { try { diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index 04d0319e35b..78d07ef4b6c 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -126,6 +126,8 @@ private: std::optional cgroupcpu_cfs_period TSA_GUARDED_BY(data_mutex); std::optional cgroupcpu_cfs_quota TSA_GUARDED_BY(data_mutex); std::optional cgroupcpu_max TSA_GUARDED_BY(data_mutex); + std::optional cgroupcpu_stat TSA_GUARDED_BY(data_mutex); + std::optional cgroupcpuacct_stat TSA_GUARDED_BY(data_mutex); std::optional vm_max_map_count TSA_GUARDED_BY(data_mutex); std::optional vm_maps TSA_GUARDED_BY(data_mutex); @@ -221,6 +223,16 @@ private: void openBlockDevices(); void openSensorsChips(); void openEDAC(); + + void applyCPUMetricsUpdate( + AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier); + + void applyNormalizedCPUMetricsUpdate( + AsynchronousMetricValues & new_values, + double num_cpus_to_normalize, + const ProcStatValuesCPU & delta_values_all_cpus, + double multiplier); + #endif void run(); diff --git a/src/Common/CgroupsMemoryUsageObserver.cpp b/src/Common/CgroupsMemoryUsageObserver.cpp index ef8bdfc1823..83b04360164 100644 --- a/src/Common/CgroupsMemoryUsageObserver.cpp +++ b/src/Common/CgroupsMemoryUsageObserver.cpp @@ -144,31 +144,6 @@ private: /// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such /// systems existed only for a short transition period. -std::optional getCgroupsV2Path() -{ - if (!cgroupsV2Enabled()) - return {}; - - if (!cgroupsV2MemoryControllerEnabled()) - return {}; - - fs::path current_cgroup = cgroupV2PathOfProcess(); - if (current_cgroup.empty()) - return {}; - - /// Return the bottom-most nested current memory file. If there is no such file at the current - /// level, try again at the parent level as memory settings are inherited. - while (current_cgroup != default_cgroups_mount.parent_path()) - { - const auto current_path = current_cgroup / "memory.current"; - const auto stat_path = current_cgroup / "memory.stat"; - if (fs::exists(current_path) && fs::exists(stat_path)) - return {current_cgroup}; - current_cgroup = current_cgroup.parent_path(); - } - return {}; -} - std::optional getCgroupsV1Path() { auto path = default_cgroups_mount / "memory/memory.stat"; @@ -179,7 +154,7 @@ std::optional getCgroupsV1Path() std::pair getCgroupsPath() { - auto v2_path = getCgroupsV2Path(); + auto v2_path = getCgroupsV2PathContainingFile("memory.current"); if (v2_path.has_value()) return {*v2_path, CgroupsMemoryUsageObserver::CgroupsVersion::V2}; diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index b6dd14d292c..67890568941 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -307,7 +307,7 @@ M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \ M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \ \ - M(S3DiskNoKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \ + M(DiskS3NoSuchKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \ #ifdef APPLY_FOR_EXTERNAL_METRICS #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 4b577a251af..68a8fa7d74c 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -12,6 +12,7 @@ #include #include #include +#include "Common/MultiVersion.h" #include #include "DNSPTRResolverProvider.h" @@ -139,12 +140,6 @@ DNSResolver::IPAddresses resolveIPAddressImpl(const std::string & host) return addresses; } -DNSResolver::IPAddresses resolveIPAddressWithCache(CacheBase & cache, const std::string & host) -{ - auto [result, _ ] = cache.getOrSet(host, [&host]() {return std::make_shared(resolveIPAddressImpl(host), std::chrono::system_clock::now());}); - return result->addresses; -} - std::unordered_set reverseResolveImpl(const Poco::Net::IPAddress & address) { auto ptr_resolver = DB::DNSPTRResolverProvider::get(); @@ -198,21 +193,89 @@ struct DNSResolver::Impl std::atomic disable_cache{false}; }; +struct DNSResolver::AddressFilter +{ + struct DNSFilterSettings + { + bool dns_allow_resolve_names_to_ipv4{true}; + bool dns_allow_resolve_names_to_ipv6{true}; + }; -DNSResolver::DNSResolver() : impl(std::make_unique()), log(getLogger("DNSResolver")) {} + AddressFilter() : settings(std::make_unique()) {} + + void performAddressFiltering(DNSResolver::IPAddresses & addresses) const + { + const auto current_settings = settings.get(); + bool dns_resolve_ipv4 = current_settings->dns_allow_resolve_names_to_ipv4; + bool dns_resolve_ipv6 = current_settings->dns_allow_resolve_names_to_ipv6; + + if (dns_resolve_ipv4 && dns_resolve_ipv6) + { + return; + } + if (!dns_resolve_ipv4 && !dns_resolve_ipv6) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "DNS can't resolve any address, because dns_resolve_ipv6_interfaces and dns_resolve_ipv4_interfaces both are disabled"); + } + + std::erase_if(addresses, [dns_resolve_ipv6, dns_resolve_ipv4](const Poco::Net::IPAddress& address) + { + return (address.family() == Poco::Net::IPAddress::IPv6 && !dns_resolve_ipv6) + || (address.family() == Poco::Net::IPAddress::IPv4 && !dns_resolve_ipv4); + }); + } + + void setSettings(bool dns_allow_resolve_names_to_ipv4, bool dns_allow_resolve_names_to_ipv6) + { + settings.set(std::make_unique(dns_allow_resolve_names_to_ipv4, dns_allow_resolve_names_to_ipv6)); + } + + MultiVersion settings; +}; + + +DNSResolver::DNSResolver() + : impl(std::make_unique()) + , addressFilter(std::make_unique()) + , log(getLogger("DNSResolver")) {} + + +DNSResolver::IPAddresses DNSResolver::getResolvedIPAdressessWithFiltering(const std::string & host) +{ + auto addresses = resolveIPAddressImpl(host); + addressFilter->performAddressFiltering(addresses); + + if (addresses.empty()) + { + ProfileEvents::increment(ProfileEvents::DNSError); + throw DB::NetException(ErrorCodes::DNS_ERROR, "After filtering there are no resolved address for host({}).", host); + } + return addresses; +} + +DNSResolver::IPAddresses DNSResolver::resolveIPAddressWithCache(const std::string & host) +{ + auto [result, _ ] = impl->cache_host.getOrSet(host, [&host, this]() {return std::make_shared(getResolvedIPAdressessWithFiltering(host), std::chrono::system_clock::now());}); + return result->addresses; +} Poco::Net::IPAddress DNSResolver::resolveHost(const std::string & host) { return pickAddress(resolveHostAll(host)); // random order -> random pick } +void DNSResolver::setFilterSettings(bool dns_allow_resolve_names_to_ipv4, bool dns_allow_resolve_names_to_ipv6) +{ + addressFilter->setSettings(dns_allow_resolve_names_to_ipv4, dns_allow_resolve_names_to_ipv6); +} + DNSResolver::IPAddresses DNSResolver::resolveHostAllInOriginOrder(const std::string & host) { if (impl->disable_cache) - return resolveIPAddressImpl(host); + return getResolvedIPAdressessWithFiltering(host); addToNewHosts(host); - return resolveIPAddressWithCache(impl->cache_host, host); + return resolveIPAddressWithCache(host); } DNSResolver::IPAddresses DNSResolver::resolveHostAll(const std::string & host) @@ -232,7 +295,7 @@ Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host_an splitHostAndPort(host_and_port, host, port); addToNewHosts(host); - return Poco::Net::SocketAddress(pickAddress(resolveIPAddressWithCache(impl->cache_host, host)), port); + return Poco::Net::SocketAddress(pickAddress(resolveIPAddressWithCache(host)), port); } Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host, UInt16 port) @@ -241,7 +304,7 @@ Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host, U return Poco::Net::SocketAddress(host, port); addToNewHosts(host); - return Poco::Net::SocketAddress(pickAddress(resolveIPAddressWithCache(impl->cache_host, host)), port); + return Poco::Net::SocketAddress(pickAddress(resolveIPAddressWithCache(host)), port); } std::vector DNSResolver::resolveAddressList(const std::string & host, UInt16 port) @@ -254,7 +317,7 @@ std::vector DNSResolver::resolveAddressList(const std: if (!impl->disable_cache) addToNewHosts(host); - std::vector ips = impl->disable_cache ? hostByName(host) : resolveIPAddressWithCache(impl->cache_host, host); + std::vector ips = impl->disable_cache ? hostByName(host) : resolveIPAddressWithCache(host); auto ips_end = std::unique(ips.begin(), ips.end()); addresses.reserve(ips_end - ips.begin()); @@ -419,8 +482,8 @@ bool DNSResolver::updateCache(UInt32 max_consecutive_failures) bool DNSResolver::updateHost(const String & host) { - const auto old_value = resolveIPAddressWithCache(impl->cache_host, host); - auto new_value = resolveIPAddressImpl(host); + const auto old_value = resolveIPAddressWithCache(host); + auto new_value = getResolvedIPAdressessWithFiltering(host); const bool result = old_value != new_value; impl->cache_host.set(host, std::make_shared(std::move(new_value), std::chrono::system_clock::now())); return result; diff --git a/src/Common/DNSResolver.h b/src/Common/DNSResolver.h index 1ddd9d3b991..b35f55dfcd2 100644 --- a/src/Common/DNSResolver.h +++ b/src/Common/DNSResolver.h @@ -68,6 +68,8 @@ public: /// Returns true if IP of any host has been changed or an element was dropped (too many failures) bool updateCache(UInt32 max_consecutive_failures); + void setFilterSettings(bool dns_allow_resolve_names_to_ipv4, bool dns_allow_resolve_names_to_ipv6); + /// Returns a copy of cache entries std::vector> cacheEntries() const; @@ -86,6 +88,10 @@ private: struct Impl; std::unique_ptr impl; + + struct AddressFilter; + std::unique_ptr addressFilter; + LoggerPtr log; /// Updates cached value and returns true it has been changed. @@ -94,6 +100,9 @@ private: void addToNewHosts(const String & host); void addToNewAddresses(const Poco::Net::IPAddress & address); + + IPAddresses resolveIPAddressWithCache(const std::string & host); + IPAddresses getResolvedIPAdressessWithFiltering(const std::string & host); }; } diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 44a1cd071cb..1055b3d34db 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -608,6 +608,7 @@ M(727, UNEXPECTED_TABLE_ENGINE) \ M(728, UNEXPECTED_DATA_TYPE) \ M(729, ILLEGAL_TIME_SERIES_TAGS) \ + M(730, REFRESH_FAILED) \ \ M(900, DISTRIBUTED_CACHE_ERROR) \ M(901, CANNOT_USE_DISTRIBUTED_CACHE) \ diff --git a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp index 36191b89e86..e9f7816ce73 100644 --- a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp +++ b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp @@ -6,14 +6,18 @@ #include #include #include -#include +#include #include +#include +#include +#include #include #include #include #include #include #include +#include namespace fs = std::filesystem; @@ -26,6 +30,7 @@ namespace ErrorCodes extern const int INVALID_CONFIG_PARAMETER; extern const int BAD_ARGUMENTS; extern const int LOGICAL_ERROR; + extern const int SUPPORT_IS_DISABLED; } static const std::string named_collections_storage_config_path = "named_collections_storage"; @@ -74,9 +79,9 @@ public: }; -class NamedCollectionsMetadataStorage::LocalStorage : public INamedCollectionsStorage, private WithContext +class NamedCollectionsMetadataStorage::LocalStorage : public INamedCollectionsStorage, protected WithContext { -private: +protected: std::string root_path; public: @@ -126,6 +131,11 @@ public: ReadBufferFromFile in(getPath(file_name)); std::string data; readStringUntilEOF(data, in); + return readHook(data); + } + + virtual std::string readHook(const std::string & data) const + { return data; } @@ -142,8 +152,9 @@ public: fs::create_directories(root_path); auto tmp_path = getPath(file_name + ".tmp"); - WriteBufferFromFile out(tmp_path, data.size(), O_WRONLY | O_CREAT | O_EXCL); - writeString(data, out); + auto write_data = writeHook(data); + WriteBufferFromFile out(tmp_path, write_data.size(), O_WRONLY | O_CREAT | O_EXCL); + writeString(write_data, out); out.next(); if (getContext()->getSettingsRef().fsync_metadata) @@ -153,6 +164,11 @@ public: fs::rename(tmp_path, getPath(file_name)); } + virtual std::string writeHook(const std::string & data) const + { + return data; + } + void remove(const std::string & file_name) override { if (!removeIfExists(file_name)) @@ -168,7 +184,7 @@ public: return fs::remove(getPath(file_name)); } -private: +protected: std::string getPath(const std::string & file_name) const { const auto file_name_as_path = fs::path(file_name); @@ -178,6 +194,7 @@ private: return fs::path(root_path) / file_name_as_path; } +private: /// Delete .tmp files. They could be left undeleted in case of /// some exception or abrupt server restart. void cleanup() @@ -194,8 +211,7 @@ private: } }; - -class NamedCollectionsMetadataStorage::ZooKeeperStorage : public INamedCollectionsStorage, private WithContext +class NamedCollectionsMetadataStorage::ZooKeeperStorage : public INamedCollectionsStorage, protected WithContext { private: std::string root_path; @@ -275,18 +291,25 @@ public: std::string read(const std::string & file_name) const override { - return getClient()->get(getPath(file_name)); + auto data = getClient()->get(getPath(file_name)); + return readHook(data); + } + + virtual std::string readHook(const std::string & data) const + { + return data; } void write(const std::string & file_name, const std::string & data, bool replace) override { + auto write_data = writeHook(data); if (replace) { - getClient()->createOrUpdate(getPath(file_name), data, zkutil::CreateMode::Persistent); + getClient()->createOrUpdate(getPath(file_name), write_data, zkutil::CreateMode::Persistent); } else { - auto code = getClient()->tryCreate(getPath(file_name), data, zkutil::CreateMode::Persistent); + auto code = getClient()->tryCreate(getPath(file_name), write_data, zkutil::CreateMode::Persistent); if (code == Coordination::Error::ZNODEEXISTS) { @@ -298,6 +321,11 @@ public: } } + virtual std::string writeHook(const std::string & data) const + { + return data; + } + void remove(const std::string & file_name) override { getClient()->remove(getPath(file_name)); @@ -334,6 +362,93 @@ private: } }; +#if USE_SSL + +template +class NamedCollectionsMetadataStorageEncrypted : public BaseMetadataStorage +{ +public: + NamedCollectionsMetadataStorageEncrypted(ContextPtr context_, const std::string & path_) + : BaseMetadataStorage(context_, path_) + { + const auto & config = BaseMetadataStorage::getContext()->getConfigRef(); + auto key_hex = config.getRawString("named_collections_storage.key_hex", ""); + try + { + key = boost::algorithm::unhex(key_hex); + key_fingerprint = FileEncryption::calculateKeyFingerprint(key); + } + catch (const std::exception &) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot read key_hex, check for valid characters [0-9a-fA-F] and length"); + } + + algorithm = FileEncryption::parseAlgorithmFromString(config.getString("named_collections_storage.algorithm", "aes_128_ctr")); + } + + std::string readHook(const std::string & data) const override + { + ReadBufferFromString in(data); + Memory<> encrypted_buffer(data.length()); + + FileEncryption::Header header; + try + { + header.read(in); + } + catch (Exception & e) + { + e.addMessage("While reading the header of encrypted data"); + throw; + } + + size_t bytes_read = 0; + while (bytes_read < encrypted_buffer.size() && !in.eof()) + { + bytes_read += in.read(encrypted_buffer.data() + bytes_read, encrypted_buffer.size() - bytes_read); + } + + std::string decrypted_buffer; + decrypted_buffer.resize(bytes_read); + FileEncryption::Encryptor encryptor(header.algorithm, key, header.init_vector); + encryptor.decrypt(encrypted_buffer.data(), bytes_read, decrypted_buffer.data()); + + return decrypted_buffer; + } + + std::string writeHook(const std::string & data) const override + { + FileEncryption::Header header{ + .algorithm = algorithm, + .key_fingerprint = key_fingerprint, + .init_vector = FileEncryption::InitVector::random() + }; + + FileEncryption::Encryptor encryptor(header.algorithm, key, header.init_vector); + WriteBufferFromOwnString out; + header.write(out); + encryptor.encrypt(data.data(), data.size(), out); + return std::string(out.str()); + } + +private: + std::string key; + UInt128 key_fingerprint; + FileEncryption::Algorithm algorithm; +}; + +class NamedCollectionsMetadataStorage::LocalStorageEncrypted : public NamedCollectionsMetadataStorageEncrypted +{ + using NamedCollectionsMetadataStorageEncrypted::NamedCollectionsMetadataStorageEncrypted; +}; + +class NamedCollectionsMetadataStorage::ZooKeeperStorageEncrypted : public NamedCollectionsMetadataStorageEncrypted +{ + using NamedCollectionsMetadataStorageEncrypted::NamedCollectionsMetadataStorageEncrypted; +}; + +#endif + NamedCollectionsMetadataStorage::NamedCollectionsMetadataStorage( std::shared_ptr storage_, ContextPtr context_) @@ -495,7 +610,7 @@ std::unique_ptr NamedCollectionsMetadataStorage const auto & config = context_->getConfigRef(); const auto storage_type = config.getString(named_collections_storage_config_path + ".type", "local"); - if (storage_type == "local") + if (storage_type == "local" || storage_type == "local_encrypted") { const auto path = config.getString( named_collections_storage_config_path + ".path", @@ -504,14 +619,36 @@ std::unique_ptr NamedCollectionsMetadataStorage LOG_TRACE(getLogger("NamedCollectionsMetadataStorage"), "Using local storage for named collections at path: {}", path); - auto local_storage = std::make_unique(context_, path); + std::unique_ptr local_storage; + if (storage_type == "local") + local_storage = std::make_unique(context_, path); + else if (storage_type == "local_encrypted") + { +#if USE_SSL + local_storage = std::make_unique(context_, path); +#else + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Named collections encryption requires building with SSL support"); +#endif + } + return std::unique_ptr( new NamedCollectionsMetadataStorage(std::move(local_storage), context_)); } - if (storage_type == "zookeeper" || storage_type == "keeper") + if (storage_type == "zookeeper" || storage_type == "keeper" || storage_type == "zookeeper_encrypted" || storage_type == "keeper_encrypted") { const auto path = config.getString(named_collections_storage_config_path + ".path"); - auto zk_storage = std::make_unique(context_, path); + + std::unique_ptr zk_storage; + if (!storage_type.ends_with("_encrypted")) + zk_storage = std::make_unique(context_, path); + else + { +#if USE_SSL + zk_storage = std::make_unique(context_, path); +#else + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Named collections encryption requires building with SSL support"); +#endif + } LOG_TRACE(getLogger("NamedCollectionsMetadataStorage"), "Using zookeeper storage for named collections at path: {}", path); diff --git a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.h b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.h index c3468fbc468..52805e8359d 100644 --- a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.h +++ b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.h @@ -35,7 +35,9 @@ public: private: class INamedCollectionsStorage; class LocalStorage; + class LocalStorageEncrypted; class ZooKeeperStorage; + class ZooKeeperStorageEncrypted; std::shared_ptr storage; diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp index 712cab80aff..2cdb3409487 100644 --- a/src/Common/OptimizedRegularExpression.cpp +++ b/src/Common/OptimizedRegularExpression.cpp @@ -244,33 +244,43 @@ const char * analyzeImpl( is_trivial = false; if (!in_square_braces) { - /// Check for case-insensitive flag. - if (pos + 1 < end && pos[1] == '?') + /// it means flag negation + /// there are various possible flags + /// actually only imsU are supported by re2 + auto is_flag_char = [](char x) { - for (size_t offset = 2; pos + offset < end; ++offset) + return x == '-' || x == 'i' || x == 'm' || x == 's' || x == 'U' || x == 'u'; + }; + /// Check for case-insensitive flag. + if (pos + 2 < end && pos[1] == '?' && is_flag_char(pos[2])) + { + size_t offset = 2; + for (; pos + offset < end; ++offset) { - if (pos[offset] == '-' /// it means flag negation - /// various possible flags, actually only imsU are supported by re2 - || (pos[offset] >= 'a' && pos[offset] <= 'z') - || (pos[offset] >= 'A' && pos[offset] <= 'Z')) + if (pos[offset] == 'i') { - if (pos[offset] == 'i') - { - /// Actually it can be negated case-insensitive flag. But we don't care. - has_case_insensitive_flag = true; - break; - } + /// Actually it can be negated case-insensitive flag. But we don't care. + has_case_insensitive_flag = true; } - else + else if (!is_flag_char(pos[offset])) break; } + pos += offset; + if (pos == end) + return pos; + /// if this group only contains flags, we have nothing to do. + if (*pos == ')') + { + ++pos; + break; + } } /// (?:regex) means non-capturing parentheses group - if (pos + 2 < end && pos[1] == '?' && pos[2] == ':') + else if (pos + 2 < end && pos[1] == '?' && pos[2] == ':') { pos += 2; } - if (pos + 3 < end && pos[1] == '?' && (pos[2] == '<' || pos[2] == '\'' || (pos[2] == 'P' && pos[3] == '<'))) + else if (pos + 3 < end && pos[1] == '?' && (pos[2] == '<' || pos[2] == '\'' || (pos[2] == 'P' && pos[3] == '<'))) { pos = skipNameCapturingGroup(pos, pos[2] == 'P' ? 3: 2, end); } diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index ccdce7ff584..d43d9fdcea8 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -209,8 +209,35 @@ \ M(Merge, "Number of launched background merges.") \ M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \ + M(MergedColumns, "Number of columns merged during the horizontal stage of merges.") \ + M(GatheredColumns, "Number of columns gathered during the vertical stage of merges.") \ M(MergedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \ - M(MergesTimeMilliseconds, "Total time spent for background merges.")\ + M(MergeTotalMilliseconds, "Total time spent for background merges") \ + M(MergeExecuteMilliseconds, "Total busy time spent for execution of background merges") \ + M(MergeHorizontalStageTotalMilliseconds, "Total time spent for horizontal stage of background merges") \ + M(MergeHorizontalStageExecuteMilliseconds, "Total busy time spent for execution of horizontal stage of background merges") \ + M(MergeVerticalStageTotalMilliseconds, "Total time spent for vertical stage of background merges") \ + M(MergeVerticalStageExecuteMilliseconds, "Total busy time spent for execution of vertical stage of background merges") \ + M(MergeProjectionStageTotalMilliseconds, "Total time spent for projection stage of background merges") \ + M(MergeProjectionStageExecuteMilliseconds, "Total busy time spent for execution of projection stage of background merges") \ + \ + M(MergingSortedMilliseconds, "Total time spent while merging sorted columns") \ + M(AggregatingSortedMilliseconds, "Total time spent while aggregating sorted columns") \ + M(CollapsingSortedMilliseconds, "Total time spent while collapsing sorted columns") \ + M(ReplacingSortedMilliseconds, "Total time spent while replacing sorted columns") \ + M(SummingSortedMilliseconds, "Total time spent while summing sorted columns") \ + M(VersionedCollapsingSortedMilliseconds, "Total time spent while version collapsing sorted columns") \ + M(GatheringColumnMilliseconds, "Total time spent while gathering columns for vertical merge") \ + \ + M(MutationTotalParts, "Number of total parts for which mutations tried to be applied") \ + M(MutationUntouchedParts, "Number of total parts for which mutations tried to be applied but which was completely skipped according to predicate") \ + M(MutatedRows, "Rows read for mutations. This is the number of rows before mutation") \ + M(MutatedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for mutations. This is the number before mutation.") \ + M(MutationTotalMilliseconds, "Total time spent for mutations.") \ + M(MutationExecuteMilliseconds, "Total busy time spent for execution of mutations.") \ + M(MutationAllPartColumns, "Number of times when task to mutate all columns in part was created") \ + M(MutationSomePartColumns, "Number of times when task to mutate some columns in part was created") \ + M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections in mutations.") \ \ M(MergeTreeDataWriterRows, "Number of rows INSERTed to MergeTree tables.") \ M(MergeTreeDataWriterUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables.") \ @@ -225,7 +252,6 @@ M(MergeTreeDataWriterProjectionsCalculationMicroseconds, "Time spent calculating projections") \ M(MergeTreeDataProjectionWriterSortingBlocksMicroseconds, "Time spent sorting blocks (for projection it might be a key different from table's sorting key)") \ M(MergeTreeDataProjectionWriterMergingBlocksMicroseconds, "Time spent merging blocks") \ - M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections") \ \ M(InsertedWideParts, "Number of parts inserted in Wide format.") \ M(InsertedCompactParts, "Number of parts inserted in Compact format.") \ diff --git a/src/Common/ProxyConfigurationResolverProvider.cpp b/src/Common/ProxyConfigurationResolverProvider.cpp index b06073121e7..a46837bfdb9 100644 --- a/src/Common/ProxyConfigurationResolverProvider.cpp +++ b/src/Common/ProxyConfigurationResolverProvider.cpp @@ -112,9 +112,8 @@ namespace return configuration.has(config_prefix + ".uri"); } - /* - * New syntax requires protocol prefix " or " - * */ + /* New syntax requires protocol prefix " or " + */ std::optional getProtocolPrefix( ProxyConfiguration::Protocol request_protocol, const String & config_prefix, @@ -130,22 +129,18 @@ namespace return protocol_prefix; } - template std::optional calculatePrefixBasedOnSettingsSyntax( + bool new_syntax, ProxyConfiguration::Protocol request_protocol, const String & config_prefix, const Poco::Util::AbstractConfiguration & configuration ) { if (!configuration.has(config_prefix)) - { return std::nullopt; - } - if constexpr (new_syntax) - { + if (new_syntax) return getProtocolPrefix(request_protocol, config_prefix, configuration); - } return config_prefix; } @@ -155,24 +150,21 @@ std::shared_ptr ProxyConfigurationResolverProvider:: Protocol request_protocol, const Poco::Util::AbstractConfiguration & configuration) { - if (auto resolver = getFromSettings(request_protocol, "proxy", configuration)) - { + if (auto resolver = getFromSettings(true, request_protocol, "proxy", configuration)) return resolver; - } return std::make_shared( request_protocol, isTunnelingDisabledForHTTPSRequestsOverHTTPProxy(configuration)); } -template std::shared_ptr ProxyConfigurationResolverProvider::getFromSettings( + bool new_syntax, Protocol request_protocol, const String & config_prefix, - const Poco::Util::AbstractConfiguration & configuration -) + const Poco::Util::AbstractConfiguration & configuration) { - auto prefix_opt = calculatePrefixBasedOnSettingsSyntax(request_protocol, config_prefix, configuration); + auto prefix_opt = calculatePrefixBasedOnSettingsSyntax(new_syntax, request_protocol, config_prefix, configuration); if (!prefix_opt) { @@ -195,20 +187,17 @@ std::shared_ptr ProxyConfigurationResolverProvider:: std::shared_ptr ProxyConfigurationResolverProvider::getFromOldSettingsFormat( Protocol request_protocol, const String & config_prefix, - const Poco::Util::AbstractConfiguration & configuration -) + const Poco::Util::AbstractConfiguration & configuration) { - /* - * First try to get it from settings only using the combination of config_prefix and configuration. + /* First try to get it from settings only using the combination of config_prefix and configuration. * This logic exists for backward compatibility with old S3 storage specific proxy configuration. * */ - if (auto resolver = ProxyConfigurationResolverProvider::getFromSettings(request_protocol, config_prefix + ".proxy", configuration)) + if (auto resolver = ProxyConfigurationResolverProvider::getFromSettings(false, request_protocol, config_prefix + ".proxy", configuration)) { return resolver; } - /* - * In case the combination of config_prefix and configuration does not provide a resolver, try to get it from general / new settings. + /* In case the combination of config_prefix and configuration does not provide a resolver, try to get it from general / new settings. * Falls back to Environment resolver if no configuration is found. * */ return ProxyConfigurationResolverProvider::get(request_protocol, configuration); diff --git a/src/Common/ProxyConfigurationResolverProvider.h b/src/Common/ProxyConfigurationResolverProvider.h index ebf22f7e92a..357b218e499 100644 --- a/src/Common/ProxyConfigurationResolverProvider.h +++ b/src/Common/ProxyConfigurationResolverProvider.h @@ -33,12 +33,11 @@ public: ); private: - template static std::shared_ptr getFromSettings( + bool is_new_syntax, Protocol protocol, const String & config_prefix, - const Poco::Util::AbstractConfiguration & configuration - ); + const Poco::Util::AbstractConfiguration & configuration); }; } diff --git a/src/Common/QueryProfiler.cpp b/src/Common/QueryProfiler.cpp index 746010b5462..85c92ec292d 100644 --- a/src/Common/QueryProfiler.cpp +++ b/src/Common/QueryProfiler.cpp @@ -110,7 +110,7 @@ namespace errno = saved_errno; } - [[maybe_unused]] constexpr UInt32 TIMER_PRECISION = 1e9; + [[maybe_unused]] constexpr UInt64 TIMER_PRECISION = 1e9; } namespace ErrorCodes @@ -167,18 +167,18 @@ void Timer::createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal } } -void Timer::set(UInt32 period) +void Timer::set(UInt64 period) { /// Too high frequency can introduce infinite busy loop of signal handlers. We will limit maximum frequency (with 1000 signals per second). - period = std::max(period, 1000000); + period = std::max(period, 1000000); /// Randomize offset as uniform random value from 0 to period - 1. /// It will allow to sample short queries even if timer period is large. /// (For example, with period of 1 second, query with 50 ms duration will be sampled with 1 / 20 probability). /// It also helps to avoid interference (moire). - UInt32 period_rand = std::uniform_int_distribution(0, period)(thread_local_rng); + UInt64 period_rand = std::uniform_int_distribution(0, period)(thread_local_rng); - struct timespec interval{.tv_sec = period / TIMER_PRECISION, .tv_nsec = period % TIMER_PRECISION}; - struct timespec offset{.tv_sec = period_rand / TIMER_PRECISION, .tv_nsec = period_rand % TIMER_PRECISION}; + struct timespec interval{.tv_sec = time_t(period / TIMER_PRECISION), .tv_nsec = int64_t(period % TIMER_PRECISION)}; + struct timespec offset{.tv_sec = time_t(period_rand / TIMER_PRECISION), .tv_nsec = int64_t(period_rand % TIMER_PRECISION)}; struct itimerspec timer_spec = {.it_interval = interval, .it_value = offset}; if (timer_settime(*timer_id, 0, &timer_spec, nullptr)) @@ -229,7 +229,7 @@ void Timer::cleanup() template QueryProfilerBase::QueryProfilerBase( - [[maybe_unused]] UInt64 thread_id, [[maybe_unused]] int clock_type, [[maybe_unused]] UInt32 period, [[maybe_unused]] int pause_signal_) + [[maybe_unused]] UInt64 thread_id, [[maybe_unused]] int clock_type, [[maybe_unused]] UInt64 period, [[maybe_unused]] int pause_signal_) : log(getLogger("QueryProfiler")), pause_signal(pause_signal_) { #if defined(SANITIZER) @@ -270,7 +270,7 @@ QueryProfilerBase::QueryProfilerBase( template -void QueryProfilerBase::setPeriod([[maybe_unused]] UInt32 period_) +void QueryProfilerBase::setPeriod([[maybe_unused]] UInt64 period_) { #if defined(SANITIZER) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers"); @@ -307,7 +307,7 @@ void QueryProfilerBase::cleanup() template class QueryProfilerBase; template class QueryProfilerBase; -QueryProfilerReal::QueryProfilerReal(UInt64 thread_id, UInt32 period) +QueryProfilerReal::QueryProfilerReal(UInt64 thread_id, UInt64 period) : QueryProfilerBase(thread_id, CLOCK_MONOTONIC, period, SIGUSR1) {} @@ -320,7 +320,7 @@ void QueryProfilerReal::signalHandler(int sig, siginfo_t * info, void * context) writeTraceInfo(TraceType::Real, sig, info, context); } -QueryProfilerCPU::QueryProfilerCPU(UInt64 thread_id, UInt32 period) +QueryProfilerCPU::QueryProfilerCPU(UInt64 thread_id, UInt64 period) : QueryProfilerBase(thread_id, CLOCK_THREAD_CPUTIME_ID, period, SIGUSR2) {} diff --git a/src/Common/QueryProfiler.h b/src/Common/QueryProfiler.h index ea4cc73bca6..e3ab0b2e094 100644 --- a/src/Common/QueryProfiler.h +++ b/src/Common/QueryProfiler.h @@ -40,7 +40,7 @@ public: ~Timer(); void createIfNecessary(UInt64 thread_id, int clock_type, int pause_signal); - void set(UInt32 period); + void set(UInt64 period); void stop(); void cleanup(); @@ -54,10 +54,10 @@ template class QueryProfilerBase { public: - QueryProfilerBase(UInt64 thread_id, int clock_type, UInt32 period, int pause_signal_); + QueryProfilerBase(UInt64 thread_id, int clock_type, UInt64 period, int pause_signal_); ~QueryProfilerBase(); - void setPeriod(UInt32 period_); + void setPeriod(UInt64 period_); private: void cleanup(); @@ -76,7 +76,7 @@ private: class QueryProfilerReal : public QueryProfilerBase { public: - QueryProfilerReal(UInt64 thread_id, UInt32 period); /// NOLINT + QueryProfilerReal(UInt64 thread_id, UInt64 period); /// NOLINT static void signalHandler(int sig, siginfo_t * info, void * context); }; @@ -85,7 +85,7 @@ public: class QueryProfilerCPU : public QueryProfilerBase { public: - QueryProfilerCPU(UInt64 thread_id, UInt32 period); /// NOLINT + QueryProfilerCPU(UInt64 thread_id, UInt64 period); /// NOLINT static void signalHandler(int sig, siginfo_t * info, void * context); }; diff --git a/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp b/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp index 01aa7df48d3..6b9f6318903 100644 --- a/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp +++ b/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp @@ -184,14 +184,20 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi // Resource update leads to loss of runtime data of nodes and may lead to temporary violation of constraints (e.g. limits) // Try to minimise this by reusing "equal" resources (initialized with the same configuration). + std::vector resources_to_attach; for (auto & [name, new_resource] : new_state->resources) { if (auto iter = state->resources.find(name); iter != state->resources.end()) // Resource update { State::ResourcePtr old_resource = iter->second; if (old_resource->equals(*new_resource)) + { new_resource = old_resource; // Rewrite with older version to avoid loss of runtime data + continue; + } } + // It is new or updated resource + resources_to_attach.emplace_back(new_resource); } // Commit new state @@ -199,17 +205,14 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi state = new_state; // Attach new and updated resources to the scheduler - for (auto & [name, resource] : new_state->resources) + for (auto & resource : resources_to_attach) { const SchedulerNodePtr & root = resource->nodes.find("/")->second.ptr; - if (root->parent == nullptr) + resource->attached_to = &scheduler; + scheduler.event_queue->enqueue([this, root] { - resource->attached_to = &scheduler; - scheduler.event_queue->enqueue([this, root] - { - scheduler.attachChild(root); - }); - } + scheduler.attachChild(root); + }); } // NOTE: after mutex unlock `state` became available for Classifier(s) and must be immutable diff --git a/src/Common/SignalHandlers.cpp b/src/Common/SignalHandlers.cpp index c4358da2453..6ac6cbcae29 100644 --- a/src/Common/SignalHandlers.cpp +++ b/src/Common/SignalHandlers.cpp @@ -18,13 +18,17 @@ namespace DB { + namespace ErrorCodes { extern const int CANNOT_SET_SIGNAL_HANDLER; extern const int CANNOT_SEND_SIGNAL; } + } +extern const char * GIT_HASH; + using namespace DB; @@ -334,7 +338,7 @@ void SignalListener::onTerminate(std::string_view message, UInt32 thread_num) co size_t pos = message.find('\n'); LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) {}", - VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", daemon ? daemon->git_hash : "", thread_num, message.substr(0, pos)); + VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, thread_num, message.substr(0, pos)); /// Print trace from std::terminate exception line-by-line to make it easy for grep. while (pos != std::string_view::npos) @@ -368,7 +372,7 @@ try LOG_FATAL(log, "########## Short fault info ############"); LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) Received signal {}", - VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", daemon ? daemon->git_hash : "", + VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, thread_num, sig); std::string signal_description = "Unknown signal"; @@ -434,13 +438,13 @@ try if (query_id.empty()) { LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (no query) Received signal {} ({})", - VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", daemon ? daemon->git_hash : "", + VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, thread_num, signal_description, sig); } else { LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) (query_id: {}) (query: {}) Received signal {} ({})", - VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", daemon ? daemon->git_hash : "", + VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, thread_num, query_id, query, signal_description, sig); } diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index 76277cbc993..bd01b639913 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -248,8 +248,31 @@ void StackTrace::forEachFrame( auto dwarf_it = dwarfs.try_emplace(object->name, object->elf).first; DB::Dwarf::LocationInfo location; - if (dwarf_it->second.findAddress( - uintptr_t(current_frame.physical_addr), location, mode, inline_frames)) + uintptr_t adjusted_addr = uintptr_t(current_frame.physical_addr); + if (i > 0) + { + /// For non-innermost stack frames, the address points to the *next* instruction + /// after the `call` instruction. But we want the line number and inline function + /// information for the `call` instruction. So subtract 1 from the address. + /// Caveats: + /// * The `call` instruction can be longer than 1 byte, so addr-1 is in the middle + /// of the instruction. That's ok for debug info lookup: address ranges in debug + /// info cover the whole instruction. + /// * If the stack trace unwound out of a signal handler, the stack frame just + /// outside the signal didn't do a function call. It was interrupted by signal. + /// There's no `call` instruction, and decrementing the address is incorrect. + /// We may get incorrect line number and inlined functions in this case. + /// Unfortunate. + /// Note that libunwind, when producing this stack trace, knows whether this + /// frame is interrupted by signal or not. We could propagate this information + /// from libunwind to here and avoid subtracting 1 in this case, but currently + /// we don't do this. + /// But we don't do the decrement for findSymbol below (because `call` is + /// ~never the last instruction of a function), so the function name should be + /// correct for both pre-signal frames and regular frames. + adjusted_addr -= 1; + } + if (dwarf_it->second.findAddress(adjusted_addr, location, mode, inline_frames)) { current_frame.file = location.file.toString(); current_frame.line = location.line; diff --git a/src/Common/StatusFile.cpp b/src/Common/StatusFile.cpp index 80464f38082..0bbb7ff411d 100644 --- a/src/Common/StatusFile.cpp +++ b/src/Common/StatusFile.cpp @@ -51,7 +51,7 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_) std::string contents; { ReadBufferFromFile in(path, 1024); - LimitReadBuffer limit_in(in, 1024, /* trow_exception */ false, /* exact_limit */ {}); + LimitReadBuffer limit_in(in, 1024, /* throw_exception */ false, /* exact_limit */ {}); readStringUntilEOF(contents, limit_in); } diff --git a/src/Common/StringHashForHeterogeneousLookup.h b/src/Common/StringHashForHeterogeneousLookup.h new file mode 100644 index 00000000000..56d8ccf0009 --- /dev/null +++ b/src/Common/StringHashForHeterogeneousLookup.h @@ -0,0 +1,30 @@ +#pragma once +#include + +namespace DB +{ + +/// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0919r3.html +struct StringHashForHeterogeneousLookup +{ + using hash_type = std::hash; + using transparent_key_equal = std::equal_to<>; + using is_transparent = void; // required to make find() work with different type than key_type + + auto operator()(const std::string_view view) const + { + return hash_type()(view); + } + + auto operator()(const std::string & str) const + { + return hash_type()(str); + } + + auto operator()(const char * data) const + { + return hash_type()(data); + } +}; + +} diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp index 064ac2261ec..1a9ed4f1ee7 100644 --- a/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/src/Common/ZooKeeper/ZooKeeper.cpp @@ -1570,7 +1570,7 @@ size_t getFailedOpIndex(Coordination::Error exception_code, const Coordination:: KeeperMultiException::KeeperMultiException(Coordination::Error exception_code, size_t failed_op_index_, const Coordination::Requests & requests_, const Coordination::Responses & responses_) - : KeeperException(exception_code, "Transaction failed: Op #{}, path", failed_op_index_), + : KeeperException(exception_code, "Transaction failed ({}): Op #{}, path", exception_code, failed_op_index_), requests(requests_), responses(responses_), failed_op_index(failed_op_index_) { addMessage(getPathForFirstFailedOp()); diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h index 657c9cb2c03..7ccdc9d1b7f 100644 --- a/src/Common/ZooKeeper/ZooKeeper.h +++ b/src/Common/ZooKeeper/ZooKeeper.h @@ -44,7 +44,7 @@ namespace ErrorCodes namespace zkutil { -/// Preferred size of multi() command (in number of ops) +/// Preferred size of multi command (in the number of operations) constexpr size_t MULTI_BATCH_SIZE = 100; struct ShuffleHost diff --git a/src/Common/examples/CMakeLists.txt b/src/Common/examples/CMakeLists.txt index 69580d4ad0e..8383e80d09d 100644 --- a/src/Common/examples/CMakeLists.txt +++ b/src/Common/examples/CMakeLists.txt @@ -92,3 +92,8 @@ endif() clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp) target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io clickhouse_common_config) + +if (TARGET ch_contrib::icu) + clickhouse_add_executable (utf8_upper_lower utf8_upper_lower.cpp) + target_link_libraries (utf8_upper_lower PRIVATE ch_contrib::icu) +endif () diff --git a/src/Common/examples/utf8_upper_lower.cpp b/src/Common/examples/utf8_upper_lower.cpp new file mode 100644 index 00000000000..826e1763105 --- /dev/null +++ b/src/Common/examples/utf8_upper_lower.cpp @@ -0,0 +1,27 @@ +#include +#include + +std::string utf8_to_lower(const std::string & input) +{ + icu::UnicodeString unicodeInput(input.c_str(), "UTF-8"); + unicodeInput.toLower(); + std::string output; + unicodeInput.toUTF8String(output); + return output; +} + +std::string utf8_to_upper(const std::string & input) +{ + icu::UnicodeString unicodeInput(input.c_str(), "UTF-8"); + unicodeInput.toUpper(); + std::string output; + unicodeInput.toUTF8String(output); + return output; +} + +int main() +{ + std::string input = "ır"; + std::cout << "upper:" << utf8_to_upper(input) << std::endl; + return 0; +} diff --git a/src/Common/parseRemoteDescription.cpp b/src/Common/parseRemoteDescription.cpp index df3820b11f9..6a53098362d 100644 --- a/src/Common/parseRemoteDescription.cpp +++ b/src/Common/parseRemoteDescription.cpp @@ -79,11 +79,16 @@ std::vector parseRemoteDescription( /// Look for the corresponding closing bracket for (m = i + 1; m < r; ++m) { - if (description[m] == '{') ++cnt; - if (description[m] == '}') --cnt; - if (description[m] == '.' && description[m-1] == '.') last_dot = m; - if (description[m] == separator) have_splitter = true; - if (cnt == 0) break; + if (description[m] == '{') + ++cnt; + if (description[m] == '}') + --cnt; + if (description[m] == '.' && description[m-1] == '.') + last_dot = m; + if (description[m] == separator) + have_splitter = true; + if (cnt == 0) + break; } if (cnt != 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function '{}': incorrect brace sequence in first argument", func_name); diff --git a/src/Common/tests/gtest_optimize_re.cpp b/src/Common/tests/gtest_optimize_re.cpp index a9fcb918b24..d6735c3ccfe 100644 --- a/src/Common/tests/gtest_optimize_re.cpp +++ b/src/Common/tests/gtest_optimize_re.cpp @@ -19,6 +19,9 @@ TEST(OptimizeRE, analyze) }; test_f("abc", "abc", {}, true, true); test_f("c([^k]*)de", ""); + test_f("(?-s)bob", "bob", {}, false, true); + test_f("(?s)bob", "bob", {}, false, true); + test_f("(?ssss", ""); test_f("abc(de)fg", "abcdefg", {}, false, true); test_f("abc(de|xyz)fg", "abc", {"abcdefg", "abcxyzfg"}, false, true); test_f("abc(de?f|xyz)fg", "abc", {"abcd", "abcxyzfg"}, false, true); diff --git a/src/Coordination/KeeperSnapshotManager.cpp b/src/Coordination/KeeperSnapshotManager.cpp index 3f5ac055470..2ed89c414ff 100644 --- a/src/Coordination/KeeperSnapshotManager.cpp +++ b/src/Coordination/KeeperSnapshotManager.cpp @@ -54,7 +54,7 @@ namespace std::filesystem::path path(snapshot_path); std::string filename = path.stem(); Strings name_parts; - splitInto<'_'>(name_parts, filename); + splitInto<'_', '.'>(name_parts, filename); return parse(name_parts[1]); } diff --git a/src/Coordination/RaftServerConfig.cpp b/src/Coordination/RaftServerConfig.cpp index 929eeeb640e..bafc177b736 100644 --- a/src/Coordination/RaftServerConfig.cpp +++ b/src/Coordination/RaftServerConfig.cpp @@ -26,12 +26,16 @@ std::optional RaftServerConfig::parse(std::string_view server) if (!with_id_endpoint && !with_server_type && !with_priority) return std::nullopt; - const std::string_view id_str = parts[0]; + std::string_view id_str = parts[0]; if (!id_str.starts_with("server.")) return std::nullopt; + id_str = id_str.substr(7); + if (auto eq_pos = id_str.find('='); std::string_view::npos != eq_pos) + id_str = id_str.substr(0, eq_pos); + Int32 id; - if (!tryParse(id, std::next(id_str.begin(), 7))) + if (!tryParse(id, id_str)) return std::nullopt; if (id <= 0) return std::nullopt; diff --git a/src/Core/ExternalTable.cpp b/src/Core/ExternalTable.cpp index c2bcf6ec651..4ff0d7092d8 100644 --- a/src/Core/ExternalTable.cpp +++ b/src/Core/ExternalTable.cpp @@ -85,7 +85,7 @@ void BaseExternalTable::parseStructureFromStructureField(const std::string & arg /// We use `formatWithPossiblyHidingSensitiveData` instead of `getColumnNameWithoutAlias` because `column->type` is an ASTFunction. /// `getColumnNameWithoutAlias` will return name of the function with `(arguments)` even if arguments is empty. if (column) - structure.emplace_back(column->name, column->type->formatWithPossiblyHidingSensitiveData(0, true, true)); + structure.emplace_back(column->name, column->type->formatWithPossiblyHidingSensitiveData(0, true, true, false)); else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Error while parsing table structure: expected column definition, got {}", child->formatForErrorMessage()); } @@ -102,7 +102,7 @@ void BaseExternalTable::parseStructureFromTypesField(const std::string & argumen throw Exception(ErrorCodes::BAD_ARGUMENTS, "Error while parsing table structure: {}", error); for (size_t i = 0; i < type_list_raw->children.size(); ++i) - structure.emplace_back("_" + toString(i + 1), type_list_raw->children[i]->formatWithPossiblyHidingSensitiveData(0, true, true)); + structure.emplace_back("_" + toString(i + 1), type_list_raw->children[i]->formatWithPossiblyHidingSensitiveData(0, true, true, false)); } void BaseExternalTable::initSampleBlock() diff --git a/src/Core/MySQL/MySQLGtid.cpp b/src/Core/MySQL/MySQLGtid.cpp index 7916f882979..28b583a0cfe 100644 --- a/src/Core/MySQL/MySQLGtid.cpp +++ b/src/Core/MySQL/MySQLGtid.cpp @@ -24,9 +24,7 @@ void GTIDSet::tryMerge(size_t i) void GTIDSets::parse(String gtid_format) { if (gtid_format.empty()) - { return; - } std::vector gtid_sets; boost::split(gtid_sets, gtid_format, [](char c) { return c == ','; }); diff --git a/src/Core/MySQL/tests/gtest_MySQLGtid.cpp b/src/Core/MySQL/tests/gtest_MySQLGtid.cpp index e31a87aaa39..e5a2fe44e5c 100644 --- a/src/Core/MySQL/tests/gtest_MySQLGtid.cpp +++ b/src/Core/MySQL/tests/gtest_MySQLGtid.cpp @@ -10,20 +10,19 @@ GTEST_TEST(GTIDSetsContains, Tests) contained1, contained2, contained3, contained4, contained5, not_contained1, not_contained2, not_contained3, not_contained4, not_contained5, not_contained6; - gtid_set.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, 24DA167-0C0C-11E8-8442-00059A3C7B00:1-19:47-49:60"); - contained1.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, 24DA167-0C0C-11E8-8442-00059A3C7B00:1-19:47-49:60"); + gtid_set.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, FBC30C64-F8C9-4DDF-8CDD-066208EB433B:1-19:47-49:60"); + contained1.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, FBC30C64-F8C9-4DDF-8CDD-066208EB433B:1-19:47-49:60"); contained2.parse("2174B383-5441-11E8-B90A-C80AA9429562:2-3:11:47-49"); contained3.parse("2174B383-5441-11E8-B90A-C80AA9429562:11"); - contained4.parse("24DA167-0C0C-11E8-8442-00059A3C7B00:2-16:47-49:60"); - contained5.parse("24DA167-0C0C-11E8-8442-00059A3C7B00:60"); + contained4.parse("FBC30C64-F8C9-4DDF-8CDD-066208EB433B:2-16:47-49:60"); + contained5.parse("FBC30C64-F8C9-4DDF-8CDD-066208EB433B:60"); - not_contained1.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-50, 24DA167-0C0C-11E8-8442-00059A3C7B00:1-19:47-49:60"); + not_contained1.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-50, FBC30C64-F8C9-4DDF-8CDD-066208EB433B:1-19:47-49:60"); not_contained2.parse("2174B383-5441-11E8-B90A-C80AA9429562:0-3:11:47-49"); not_contained3.parse("2174B383-5441-11E8-B90A-C80AA9429562:99"); - not_contained4.parse("24DA167-0C0C-11E8-8442-00059A3C7B00:2-16:46-49:60"); - not_contained5.parse("24DA167-0C0C-11E8-8442-00059A3C7B00:99"); - not_contained6.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, 24DA167-0C0C-11E8-8442-00059A3C7B00:1-19:47-49:60, 00000000-0000-0000-0000-000000000000"); - + not_contained4.parse("FBC30C64-F8C9-4DDF-8CDD-066208EB433B:2-16:46-49:60"); + not_contained5.parse("FBC30C64-F8C9-4DDF-8CDD-066208EB433B:99"); + not_contained6.parse("2174B383-5441-11E8-B90A-C80AA9429562:1-3:11:47-49, FBC30C64-F8C9-4DDF-8CDD-066208EB433B:1-19:47-49:60, 00000000-0000-0000-0000-000000000000"); ASSERT_TRUE(gtid_set.contains(contained1)); ASSERT_TRUE(gtid_set.contains(contained2)); diff --git a/src/Core/PostgreSQL/PoolWithFailover.cpp b/src/Core/PostgreSQL/PoolWithFailover.cpp index 5014564dbe0..054fc3b2226 100644 --- a/src/Core/PostgreSQL/PoolWithFailover.cpp +++ b/src/Core/PostgreSQL/PoolWithFailover.cpp @@ -23,7 +23,7 @@ namespace postgres { PoolWithFailover::PoolWithFailover( - const DB::ExternalDataSourcesConfigurationByPriority & configurations_by_priority, + const ReplicasConfigurationByPriority & configurations_by_priority, size_t pool_size, size_t pool_wait_timeout_, size_t max_tries_, diff --git a/src/Core/PostgreSQL/PoolWithFailover.h b/src/Core/PostgreSQL/PoolWithFailover.h index 502a9a9b7d7..2237c752367 100644 --- a/src/Core/PostgreSQL/PoolWithFailover.h +++ b/src/Core/PostgreSQL/PoolWithFailover.h @@ -8,7 +8,6 @@ #include "ConnectionHolder.h" #include #include -#include #include @@ -20,12 +19,12 @@ namespace postgres class PoolWithFailover { - -using RemoteDescription = std::vector>; - public: + using ReplicasConfigurationByPriority = std::map>; + using RemoteDescription = std::vector>; + PoolWithFailover( - const DB::ExternalDataSourcesConfigurationByPriority & configurations_by_priority, + const ReplicasConfigurationByPriority & configurations_by_priority, size_t pool_size, size_t pool_wait_timeout, size_t max_tries_, diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index 02d54221ed3..790987272fa 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -83,6 +83,9 @@ static constexpr auto DBMS_MIN_REVISION_WITH_SYSTEM_KEYWORDS_TABLE = 54468; static constexpr auto DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION = 54469; +/// Packets size header +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS = 54470; + /// Version of ClickHouse TCP protocol. /// /// Should be incremented manually on protocol changes. @@ -90,6 +93,6 @@ static constexpr auto DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION = 54469; /// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, /// later is just a number for server version (one number instead of commit SHA) /// for simplicity (sometimes it may be more convenient in some use cases). -static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54469; +static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54470; } diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index 256f78ddea0..79173503f28 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -106,6 +106,8 @@ namespace DB M(UInt64, dns_cache_max_entries, 10000, "Internal DNS cache max entries.", 0) \ M(Int32, dns_cache_update_period, 15, "Internal DNS cache update period in seconds.", 0) \ M(UInt32, dns_max_consecutive_failures, 10, "Max DNS resolve failures of a hostname before dropping the hostname from ClickHouse DNS cache.", 0) \ + M(Bool, dns_allow_resolve_names_to_ipv4, true, "Allows resolve names to ipv4 addresses.", 0) \ + M(Bool, dns_allow_resolve_names_to_ipv6, true, "Allows resolve names to ipv6 addresses.", 0) \ \ M(UInt64, max_table_size_to_drop, 50000000000lu, "If size of a table is greater than this value (in bytes) than table could not be dropped with any DROP query.", 0) \ M(UInt64, max_partition_size_to_drop, 50000000000lu, "Same as max_table_size_to_drop, but for the partitions.", 0) \ diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0808e8eb49f..479d5939b57 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -325,6 +325,7 @@ class IColumn; \ M(Bool, join_use_nulls, false, "Use NULLs for non-joined rows of outer JOINs for types that can be inside Nullable. If false, use default value of corresponding columns data type.", IMPORTANT) \ \ + M(Int32, join_output_by_rowlist_perkey_rows_threshold, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join.", 0) \ M(JoinStrictness, join_default_strictness, JoinStrictness::All, "Set default strictness in JOIN query. Possible values: empty string, 'ANY', 'ALL'. If empty, query without strictness will throw exception.", 0) \ M(Bool, any_join_distinct_right_table_keys, false, "Enable old ANY JOIN logic with many-to-one left-to-right table keys mapping for all ANY JOINs. It leads to confusing not equal results for 't1 ANY LEFT JOIN t2' and 't2 ANY RIGHT JOIN t1'. ANY RIGHT JOIN needs one-to-many keys mapping to be consistent with LEFT one.", IMPORTANT) \ M(Bool, single_join_prefer_left_table, true, "For single JOIN in case of identifier ambiguity prefer left table", IMPORTANT) \ @@ -593,7 +594,6 @@ class IColumn; M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \ M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) ALIAS(allow_experimental_lightweight_delete) \ M(UInt64, lightweight_deletes_sync, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes", 0) \ - M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete.", 0) \ M(Bool, apply_deleted_mask, true, "Enables filtering out rows deleted with lightweight DELETE. If disabled, a query will be able to read those rows. This is useful for debugging and \"undelete\" scenarios", 0) \ M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \ M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \ @@ -616,6 +616,7 @@ class IColumn; M(Bool, throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert, true, "Throw exception on INSERT query when the setting `deduplicate_blocks_in_dependent_materialized_views` is enabled along with `async_insert`. It guarantees correctness, because these features can't work together.", 0) \ M(Bool, materialized_views_ignore_errors, false, "Allows to ignore errors for MATERIALIZED VIEW, and deliver original block to the table regardless of MVs", 0) \ M(Bool, ignore_materialized_views_with_dropped_target_table, false, "Ignore MVs with dropped target table during pushing to views", 0) \ + M(Bool, allow_materialized_view_with_bad_select, true, "Allow CREATE MATERIALIZED VIEW with SELECT query that references nonexistent tables or columns. It must still be syntactically valid. Doesn't apply to refreshable MVs. Doesn't apply if the MV schema needs to be inferred from the SELECT query (i.e. if the CREATE has no column list and no TO table). Can be used for creating MV before its source table.", 0) \ M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \ M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \ M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \ @@ -878,9 +879,10 @@ class IColumn; M(Bool, allow_get_client_http_header, false, "Allow to use the function `getClientHTTPHeader` which lets to obtain a value of an the current HTTP request's header. It is not enabled by default for security reasons, because some headers, such as `Cookie`, could contain sensitive info. Note that the `X-ClickHouse-*` and `Authentication` headers are always restricted and cannot be obtained with this function.", 0) \ M(Bool, cast_string_to_dynamic_use_inference, false, "Use types inference during String to Dynamic conversion", 0) \ M(Bool, enable_blob_storage_log, true, "Write information about blob storage operations to system.blob_storage_log table", 0) \ + M(Bool, use_json_alias_for_old_object_type, false, "When enabled, JSON type alias will create old experimental Object type instead of a new JSON type", 0) \ M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0) \ M(Bool, create_index_ignore_unique, false, "Ignore UNIQUE keyword in CREATE UNIQUE INDEX. Made for SQL compatibility tests.", 0) \ - M(Bool, print_pretty_type_names, true, "Print pretty type names in DESCRIBE query and toTypeName() function", 0) \ + M(Bool, print_pretty_type_names, true, "Print pretty type names in the DESCRIBE query and `toTypeName` function, as well as in the `SHOW CREATE TABLE` query and the `formatQuery` function.", 0) \ M(Bool, create_table_empty_primary_key_by_default, false, "Allow to create *MergeTree tables with empty primary key when ORDER BY and PRIMARY KEY not specified", 0) \ M(Bool, allow_named_collection_override_by_default, true, "Allow named collections' fields override by default.", 0) \ M(SQLSecurityType, default_normal_view_sql_security, SQLSecurityType::INVOKER, "Allows to set a default value for SQL SECURITY option when creating a normal view.", 0) \ @@ -897,6 +899,7 @@ class IColumn; M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \ M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \ M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \ + M(Bool, create_if_not_exists, false, "Enable IF NOT EXISTS for CREATE statements by default", 0) \ \ \ /* ###################################### */ \ @@ -911,6 +914,7 @@ class IColumn; M(Bool, allow_experimental_vector_similarity_index, false, "Allow experimental vector similarity index", 0) \ M(Bool, allow_experimental_variant_type, false, "Allow Variant data type", 0) \ M(Bool, allow_experimental_dynamic_type, false, "Allow Dynamic data type", 0) \ + M(Bool, allow_experimental_json_type, false, "Allow JSON data type", 0) \ M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \ M(UInt64, max_limit_for_ann_queries, 1'000'000, "SELECT queries with LIMIT bigger than this setting cannot use ANN indexes. Helps to prevent memory overflows in ANN search indexes.", 0) \ M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \ @@ -1132,10 +1136,13 @@ class IColumn; M(Bool, input_format_json_defaults_for_missing_elements_in_named_tuple, true, "Insert default value in named tuple element if it's missing in json object", 0) \ M(Bool, input_format_json_throw_on_bad_escape_sequence, true, "Throw an exception if JSON string contains bad escape sequence in JSON input formats. If disabled, bad escape sequences will remain as is in the data", 0) \ M(Bool, input_format_json_ignore_unnecessary_fields, true, "Ignore unnecessary fields and not parse them. Enabling this may not throw exceptions on json strings of invalid format or with duplicated fields", 0) \ + M(Bool, input_format_try_infer_variants, false, "Try to infer the Variant type in text formats when there is more than one possible type for column/array elements", 0) \ + M(Bool, type_json_skip_duplicated_paths, false, "When enabled, during parsing JSON object into JSON type duplicated paths will be ignored and only the first one will be inserted instead of an exception", 0) \ M(UInt64, input_format_json_max_depth, 1000, "Maximum depth of a field in JSON. This is not a strict limit, it does not have to be applied precisely.", 0) \ M(Bool, input_format_try_infer_integers, true, "Try to infer integers instead of floats while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \ M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \ + M(Bool, input_format_try_infer_datetimes_only_datetime64, false, "When input_format_try_infer_datetimes is enabled, infer only DateTime64 but not DateTime types", 0) \ M(Bool, input_format_try_infer_exponent_floats, false, "Try to infer floats in exponential notation while schema inference in text formats (except JSON, where exponent numbers are always inferred)", 0) \ M(Bool, output_format_markdown_escape_special_characters, false, "Escape special characters in Markdown", 0) \ M(Bool, input_format_protobuf_flatten_google_wrappers, false, "Enable Google wrappers for regular non-nested columns, e.g. google.protobuf.StringValue 'str' for String column 'str'. For Nullable columns empty wrappers are recognized as defaults, and missing as nulls", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 20a8721c10e..fb59577b0f0 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -71,10 +71,12 @@ static std::initializer_list)" instead of simple "" if `is_auto==true`. + /// Writes "auto()" instead of simple "" if `is_auto == true`. String toString() const; void parseFromString(const String & str); diff --git a/src/Core/TypeId.h b/src/Core/TypeId.h index e4f850cbb59..1eba944e63e 100644 --- a/src/Core/TypeId.h +++ b/src/Core/TypeId.h @@ -45,6 +45,7 @@ enum class TypeIndex : uint8_t AggregateFunction, LowCardinality, Map, + ObjectDeprecated, Object, IPv4, IPv6, diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index e7ae8ea5a1d..c42bf7641d2 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -16,39 +16,29 @@ #include #if defined(OS_LINUX) - #include +#include #endif #include #include #include - #include #include #include -#include #include -#include #include #include #include #include #include - #include #include #include -#include #include -#include #include -#include #include -#include #include -#include -#include #include #include #include @@ -459,17 +449,9 @@ void BaseDaemon::initializeTerminationAndSignalProcessing() signal_listener_thread.start(*signal_listener); #if defined(__ELF__) && !defined(OS_FREEBSD) - String build_id_hex = SymbolIndex::instance().getBuildIDHex(); - if (build_id_hex.empty()) - build_id = ""; - else - build_id = build_id_hex; -#else - build_id = ""; + build_id = SymbolIndex::instance().getBuildIDHex(); #endif - git_hash = GIT_HASH; - #if defined(OS_LINUX) std::string executable_path = getExecutablePath(); @@ -482,7 +464,7 @@ void BaseDaemon::logRevision() const { logger().information("Starting " + std::string{VERSION_FULL} + " (revision: " + std::to_string(ClickHouseRevision::getVersionRevision()) - + ", git hash: " + (git_hash.empty() ? "" : git_hash) + + ", git hash: " + std::string(GIT_HASH) + ", build id: " + (build_id.empty() ? "" : build_id) + ")" + ", PID " + std::to_string(getpid())); } diff --git a/src/Daemon/BaseDaemon.h b/src/Daemon/BaseDaemon.h index b15aa74fcf3..a6efa94a567 100644 --- a/src/Daemon/BaseDaemon.h +++ b/src/Daemon/BaseDaemon.h @@ -165,7 +165,6 @@ protected: Poco::Util::AbstractConfiguration * last_configuration = nullptr; String build_id; - String git_hash; String stored_binary_hash; bool should_setup_watchdog = false; diff --git a/src/Daemon/CMakeLists.txt b/src/Daemon/CMakeLists.txt index 35ea2122dbb..2068af2200d 100644 --- a/src/Daemon/CMakeLists.txt +++ b/src/Daemon/CMakeLists.txt @@ -1,10 +1,7 @@ -configure_file(GitHash.cpp.in GitHash.generated.cpp) - add_library (daemon BaseDaemon.cpp GraphiteWriter.cpp SentryWriter.cpp - GitHash.generated.cpp ) target_link_libraries (daemon PUBLIC loggers common PRIVATE clickhouse_parsers clickhouse_common_io clickhouse_common_config) diff --git a/src/DataTypes/DataTypeDynamic.cpp b/src/DataTypes/DataTypeDynamic.cpp index 5f713e9adc9..c35f7526a18 100644 --- a/src/DataTypes/DataTypeDynamic.cpp +++ b/src/DataTypes/DataTypeDynamic.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include namespace DB @@ -67,7 +68,11 @@ static DataTypePtr create(const ASTPtr & arguments) if (!argument || argument->name != "equals") throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Dynamic data type argument should be in a form 'max_types=N'"); - auto identifier_name = argument->arguments->children[0]->as()->name(); + const auto * identifier = argument->arguments->children[0]->as(); + if (!identifier) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected Dynamic type argument: {}. Expected expression 'max_types=N'", identifier->formatForErrorMessage()); + + auto identifier_name = identifier->name(); if (identifier_name != "max_types") throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected identifier: {}. Dynamic data type argument should be in a form 'max_types=N'", identifier_name); @@ -84,9 +89,53 @@ void registerDataTypeDynamic(DataTypeFactory & factory) factory.registerDataType("Dynamic", create); } +namespace +{ + +/// Split Dynamic subcolumn name into 2 parts: type name and subcolumn of this type. +/// We cannot simply split by '.' because type name can also contain dots. For example: Tuple(`a.b` UInt32). +/// But in all such cases this '.' will be inside back quotes. To split subcolumn name correctly +/// we search for the first '.' that is not inside back quotes. +std::pair splitSubcolumnName(std::string_view subcolumn_name) +{ + bool inside_quotes = false; + const char * pos = subcolumn_name.data(); + const char * end = subcolumn_name.data() + subcolumn_name.size(); + while (true) + { + pos = find_first_symbols<'`', '.', '\\'>(pos, end); + if (pos == end) + break; + + if (*pos == '`') + { + inside_quotes = !inside_quotes; + ++pos; + } + else if (*pos == '\\') + { + ++pos; + } + else if (*pos == '.') + { + if (inside_quotes) + ++pos; + else + break; + } + } + + if (pos == end) + return {subcolumn_name, {}}; + + return {std::string_view(subcolumn_name.data(), pos), std::string_view(pos + 1, end)}; +} + +} + std::unique_ptr DataTypeDynamic::getDynamicSubcolumnData(std::string_view subcolumn_name, const DB::IDataType::SubstreamData & data, bool throw_if_null) const { - auto [type_subcolumn_name, subcolumn_nested_name] = Nested::splitName(subcolumn_name); + auto [type_subcolumn_name, subcolumn_nested_name] = splitSubcolumnName(subcolumn_name); /// Check if requested subcolumn is a valid data type. auto subcolumn_type = DataTypeFactory::instance().tryGet(String(type_subcolumn_name)); if (!subcolumn_type) diff --git a/src/DataTypes/DataTypeDynamic.h b/src/DataTypes/DataTypeDynamic.h index d5e4c5261ce..2e7a23d314d 100644 --- a/src/DataTypes/DataTypeDynamic.h +++ b/src/DataTypes/DataTypeDynamic.h @@ -12,6 +12,9 @@ class DataTypeDynamic final : public IDataType public: static constexpr bool is_parametric = true; + /// Don't change this constant, it can break backward compatibility. + static constexpr size_t DEFAULT_MAX_DYNAMIC_TYPES = 32; + explicit DataTypeDynamic(size_t max_dynamic_types_ = DEFAULT_MAX_DYNAMIC_TYPES); TypeIndex getTypeId() const override { return TypeIndex::Dynamic; } @@ -43,8 +46,6 @@ public: size_t getMaxDynamicTypes() const { return max_dynamic_types; } private: - static constexpr size_t DEFAULT_MAX_DYNAMIC_TYPES = 32; - SerializationPtr doGetDefaultSerialization() const override; String doGetName() const override; diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index ca2ebdfbdbb..107d2d48135 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -273,9 +273,10 @@ DataTypeFactory::DataTypeFactory() registerDataTypeDomainSimpleAggregateFunction(*this); registerDataTypeDomainGeo(*this); registerDataTypeMap(*this); - registerDataTypeObject(*this); + registerDataTypeObjectDeprecated(*this); registerDataTypeVariant(*this); registerDataTypeDynamic(*this); + registerDataTypeJSON(*this); } DataTypeFactory & DataTypeFactory::instance() diff --git a/src/DataTypes/DataTypeFactory.h b/src/DataTypes/DataTypeFactory.h index a8324341691..7234c53551c 100644 --- a/src/DataTypes/DataTypeFactory.h +++ b/src/DataTypes/DataTypeFactory.h @@ -99,8 +99,9 @@ void registerDataTypeLowCardinality(DataTypeFactory & factory); void registerDataTypeDomainBool(DataTypeFactory & factory); void registerDataTypeDomainSimpleAggregateFunction(DataTypeFactory & factory); void registerDataTypeDomainGeo(DataTypeFactory & factory); -void registerDataTypeObject(DataTypeFactory & factory); +void registerDataTypeObjectDeprecated(DataTypeFactory & factory); void registerDataTypeVariant(DataTypeFactory & factory); void registerDataTypeDynamic(DataTypeFactory & factory); +void registerDataTypeJSON(DataTypeFactory & factory); } diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 5fd69688194..a56764f4e6e 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -1,83 +1,525 @@ -#include #include -#include +#include +#include +#include +#include +#include +#include #include #include #include +#include +#include +#include +#include +#include +#include +#include #include +#include "config.h" + +#if USE_SIMDJSON +# include +#elif USE_RAPIDJSON +# include +#else +# include +#endif namespace DB { namespace ErrorCodes { - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int UNEXPECTED_AST_STRUCTURE; + extern const int BAD_ARGUMENTS; + extern const int CANNOT_COMPILE_REGEXP; } -DataTypeObject::DataTypeObject(const String & schema_format_, bool is_nullable_) - : schema_format(Poco::toLower(schema_format_)) - , is_nullable(is_nullable_) +DataTypeObject::DataTypeObject( + const SchemaFormat & schema_format_, + std::unordered_map typed_paths_, + std::unordered_set paths_to_skip_, + std::vector path_regexps_to_skip_, + size_t max_dynamic_paths_, + size_t max_dynamic_types_) + : schema_format(schema_format_) + , typed_paths(std::move(typed_paths_)) + , paths_to_skip(std::move(paths_to_skip_)) + , path_regexps_to_skip(std::move(path_regexps_to_skip_)) + , max_dynamic_paths(max_dynamic_paths_) + , max_dynamic_types(max_dynamic_types_) +{ + /// Check if regular expressions are valid. + for (const auto & regexp_str : path_regexps_to_skip) + { + re2::RE2::Options options; + /// Don't log errors to stderr. + options.set_log_errors(false); + auto regexp = re2::RE2(regexp_str, options); + if (!regexp.ok()) + throw Exception(ErrorCodes::CANNOT_COMPILE_REGEXP, "Invalid regexp '{}': {}", regexp_str, regexp.error()); + } + + for (const auto & [typed_path, type] : typed_paths) + { + for (const auto & path_to_skip : paths_to_skip) + { + if (typed_path.starts_with(path_to_skip)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified with the data type ('{}') and matches the SKIP path prefix '{}'", typed_path, type->getName(), path_to_skip); + } + + for (const auto & path_regex_to_skip : path_regexps_to_skip) + { + if (re2::RE2::FullMatch(typed_path, re2::RE2(path_regex_to_skip))) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path '{}' is specified with the data type ('{}') and matches the SKIP REGEXP '{}'", typed_path, type->getName(), path_regex_to_skip); + } + } +} + +DataTypeObject::DataTypeObject(const DB::DataTypeObject::SchemaFormat & schema_format_, size_t max_dynamic_paths_, size_t max_dynamic_types_) + : schema_format(schema_format_) + , max_dynamic_paths(max_dynamic_paths_) + , max_dynamic_types(max_dynamic_types_) { } bool DataTypeObject::equals(const IDataType & rhs) const { if (const auto * object = typeid_cast(&rhs)) - return schema_format == object->schema_format && is_nullable == object->is_nullable; + { + if (typed_paths.size() != object->typed_paths.size()) + return false; + + for (const auto & [path, type] : typed_paths) + { + auto it = object->typed_paths.find(path); + if (it == object->typed_paths.end()) + return false; + if (!type->equals(*it->second)) + return false; + } + + return schema_format == object->schema_format && paths_to_skip == object->paths_to_skip && path_regexps_to_skip == object->path_regexps_to_skip + && max_dynamic_types == object->max_dynamic_types && max_dynamic_paths == object->max_dynamic_paths; + } + return false; } SerializationPtr DataTypeObject::doGetDefaultSerialization() const { - return getObjectSerialization(schema_format); + std::unordered_map typed_path_serializations; + typed_path_serializations.reserve(typed_paths.size()); + for (const auto & [path, type] : typed_paths) + typed_path_serializations[path] = type->getDefaultSerialization(); + + switch (schema_format) + { + case SchemaFormat::JSON: +#if USE_SIMDJSON + return std::make_shared>( + std::move(typed_path_serializations), + paths_to_skip, + path_regexps_to_skip, + buildJSONExtractTree(getPtr(), "JSON serialization")); +#elif USE_RAPIDJSON + return std::make_shared>( + std::move(typed_path_serializations), + paths_to_skip, + path_regexps_to_skip, + buildJSONExtractTree(getPtr(), "JSON serialization")); +#else + return std::make_shared>( + std::move(typed_path_serializations), + paths_to_skip, + path_regexps_to_skip, + buildJSONExtractTree(getPtr(), "JSON serialization")); +#endif + } } String DataTypeObject::doGetName() const { WriteBufferFromOwnString out; - if (is_nullable) - out << "Object(Nullable(" << quote << schema_format << "))"; - else - out << "Object(" << quote << schema_format << ")"; + out << magic_enum::enum_name(schema_format); + bool first = true; + auto write_separator = [&]() + { + if (!first) + { + out << ", "; + } + else + { + out << "("; + first = false; + } + }; + + if (max_dynamic_types != DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES) + { + write_separator(); + out << "max_dynamic_types=" << max_dynamic_types; + } + + if (max_dynamic_paths != DEFAULT_MAX_SEPARATELY_STORED_PATHS) + { + write_separator(); + out << "max_dynamic_paths=" << max_dynamic_paths; + } + + std::vector sorted_typed_paths; + sorted_typed_paths.reserve(typed_paths.size()); + for (const auto & [path, _] : typed_paths) + sorted_typed_paths.push_back(path); + std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end()); + for (const auto & path : sorted_typed_paths) + { + write_separator(); + out << backQuoteIfNeed(path) << " " << typed_paths.at(path)->getName(); + } + + std::vector sorted_skip_paths; + sorted_skip_paths.reserve(paths_to_skip.size()); + for (const auto & skip_path : paths_to_skip) + sorted_skip_paths.push_back(skip_path); + std::sort(sorted_skip_paths.begin(), sorted_skip_paths.end()); + for (const auto & skip_path : sorted_skip_paths) + { + write_separator(); + out << "SKIP " << backQuoteIfNeed(skip_path); + } + + for (const auto & skip_regexp : path_regexps_to_skip) + { + write_separator(); + out << "SKIP REGEXP " << quoteString(skip_regexp); + } + + if (!first) + out << ")"; + return out.str(); } -static DataTypePtr create(const ASTPtr & arguments) +MutableColumnPtr DataTypeObject::createColumn() const { - if (!arguments || arguments->children.size() != 1) - throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Object data type family must have one argument - name of schema format"); + std::unordered_map typed_path_columns; + typed_path_columns.reserve(typed_paths.size()); + for (const auto & [path, type] : typed_paths) + typed_path_columns[path] = type->createColumn(); - ASTPtr schema_argument = arguments->children[0]; - bool is_nullable = false; + return ColumnObject::create(std::move(typed_path_columns), max_dynamic_paths, max_dynamic_types); +} - if (const auto * type = schema_argument->as()) +namespace +{ + +/// It is possible to have nested JSON object inside Dynamic. For example when we have an array of JSON objects. +/// During type inference in parsing in case of creating nested JSON objects, we reduce max_dynamic_paths/max_dynamic_types by factors +/// NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR/NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR. +/// So the type name will actually be JSON(max_dynamic_paths=N, max_dynamic_types=M). But we want the user to be able to query it +/// using json.array.:`Array(JSON)`.some.path without specifying max_dynamic_paths/max_dynamic_types. +/// To support it, we do a trick - we replace JSON name in subcolumn to JSON(max_dynamic_paths=N, max_dynamic_types=M), because we know +/// the exact values of max_dynamic_paths/max_dynamic_types for it. +void replaceJSONTypeNameIfNeeded(String & type_name, size_t max_dynamic_paths, size_t max_dynamic_types) +{ + auto pos = type_name.find("JSON"); + while (pos != String::npos) { - if (type->name != "Nullable" || type->arguments->children.size() != 1) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, - "Expected 'Nullable()' as parameter for type Object (function: {})", type->name); + /// Replace only if we don't already have parameters in JSON type declaration. + if (pos + 4 == type_name.size() || type_name[pos + 4] != '(') + type_name.replace( + pos, + 4, + fmt::format( + "JSON(max_dynamic_paths={}, max_dynamic_types={})", + max_dynamic_paths / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, + max_dynamic_types / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR)); + pos = type_name.find("JSON", pos + 4); + } +} - schema_argument = type->arguments->children[0]; - is_nullable = true; +/// JSON subcolumn name with Dynamic type subcolumn looks like this: +/// "json.some.path.:`Type_name`.some.subcolumn". +/// We back quoted type name during identifier parsing so we can distinguish type subcolumn and path element ":TypeName". +std::pair splitPathAndDynamicTypeSubcolumn(std::string_view subcolumn_name, size_t max_dynamic_paths, size_t max_dynamic_types) +{ + /// Try to find dynamic type subcolumn in a form .:`Type`. + auto pos = subcolumn_name.find(".:`"); + if (pos == std::string_view::npos) + return {String(subcolumn_name), ""}; + + ReadBufferFromMemory buf(subcolumn_name.substr(pos + 2)); + String dynamic_subcolumn; + /// Try to read back quoted type name. + if (!tryReadBackQuotedString(dynamic_subcolumn, buf)) + return {String(subcolumn_name), ""}; + + replaceJSONTypeNameIfNeeded(dynamic_subcolumn, max_dynamic_paths, max_dynamic_types); + + /// If there is more data in the buffer - it's subcolumn of a type, append it to the type name. + if (!buf.eof()) + dynamic_subcolumn += String(buf.position(), buf.available()); + + return {String(subcolumn_name.substr(0, pos)), dynamic_subcolumn}; +} + +/// Sub-object subcolumn in JSON path always looks like "^`some`.path.path". +/// We back quote first path element after `^` so we can distinguish sub-object subcolumn and path element "^path". +std::optional tryGetSubObjectSubcolumn(std::string_view subcolumn_name) +{ + if (!subcolumn_name.starts_with("^`")) + return std::nullopt; + + ReadBufferFromMemory buf(subcolumn_name.data() + 1); + String path; + /// Try to read back-quoted first path element. + if (!tryReadBackQuotedString(path, buf)) + return std::nullopt; + + /// Add remaining path elements if any. + return path + String(buf.position(), buf.available()); +} + +/// Return sub-path by specified prefix. +/// For example, for prefix a.b: +/// a.b.c.d -> c.d, a.b.c -> c +String getSubPath(const String & path, const String & prefix) +{ + return path.substr(prefix.size() + 1); +} + +std::string_view getSubPath(std::string_view path, const String & prefix) +{ + return path.substr(prefix.size() + 1); +} + +} + +std::unique_ptr DataTypeObject::getDynamicSubcolumnData(std::string_view subcolumn_name, const SubstreamData & data, bool throw_if_null) const +{ + /// Check if it's sub-object subcolumn. + /// In this case we should return JSON column with all paths that are inside specified object prefix. + /// For example, if we have {"a" : {"b" : {"c" : {"d" : 10, "e" : "Hello"}, "f" : [1, 2, 3]}}} and subcolumn ^a.b + /// we should return JSON column with data {"c" : {"d" : 10, "e" : Hello}, "f" : [1, 2, 3]} + if (auto sub_object_subcolumn = tryGetSubObjectSubcolumn(subcolumn_name)) + { + const String & prefix = *sub_object_subcolumn; + /// Collect new typed paths. + std::unordered_map typed_sub_paths; + /// Collect serializations for typed paths. They will be needed for sub-object subcolumn deserialization. + std::unordered_map typed_paths_serializations; + for (const auto & [path, type] : typed_paths) + { + if (path.starts_with(prefix) && path.size() != prefix.size()) + { + typed_sub_paths[getSubPath(path, prefix)] = type; + typed_paths_serializations[path] = type->getDefaultSerialization(); + } + } + + std::unique_ptr res = std::make_unique(std::make_shared(prefix, typed_paths_serializations)); + /// Keep all current constraints like limits and skip paths/prefixes/regexps. + res->type = std::make_shared(schema_format, typed_sub_paths, paths_to_skip, path_regexps_to_skip, max_dynamic_paths, max_dynamic_types); + /// If column was provided, we should create a column for the requested subcolumn. + if (data.column) + { + const auto & object_column = assert_cast(*data.column); + + auto result_column = res->type->createColumn(); + auto & result_object_column = assert_cast(*result_column); + + /// Iterate over all typed/dynamic/shared data paths and collect all paths with specified prefix. + auto & result_typed_columns = result_object_column.getTypedPaths(); + for (const auto & [path, column] : object_column.getTypedPaths()) + { + if (path.starts_with(prefix) && path.size() != prefix.size()) + result_typed_columns[getSubPath(path, prefix)] = column; + } + + auto & result_dynamic_columns = result_object_column.getDynamicPaths(); + auto & result_dynamic_columns_ptrs = result_object_column.getDynamicPathsPtrs(); + for (const auto & [path, column] : object_column.getDynamicPaths()) + { + if (path.starts_with(prefix) && path.size() != prefix.size()) + { + auto sub_path = getSubPath(path, prefix); + result_dynamic_columns[sub_path] = column; + result_dynamic_columns_ptrs[sub_path] = assert_cast(result_dynamic_columns[sub_path].get()); + } + } + + const auto & shared_data_offsets = object_column.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = object_column.getSharedDataPathsAndValues(); + auto & result_shared_data_offsets = result_object_column.getSharedDataOffsets(); + result_shared_data_offsets.reserve(shared_data_offsets.size()); + auto [result_shared_data_paths, result_shared_data_values] = result_object_column.getSharedDataPathsAndValues(); + for (size_t i = 0; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[ssize_t(i) - 1]; + size_t end = shared_data_offsets[ssize_t(i)]; + size_t lower_bound_index = ColumnObject::findPathLowerBoundInSharedData(prefix, *shared_data_paths, start, end); + for (; lower_bound_index != end; ++lower_bound_index) + { + auto path = shared_data_paths->getDataAt(lower_bound_index).toView(); + if (!path.starts_with(prefix)) + break; + + /// Don't include path that is equal to the prefix. + if (path.size() != prefix.size()) + { + auto sub_path = getSubPath(path, prefix); + result_shared_data_paths->insertData(sub_path.data(), sub_path.size()); + result_shared_data_values->insertFrom(*shared_data_values, lower_bound_index); + } + } + result_shared_data_offsets.push_back(result_shared_data_paths->size()); + } + + res->column = std::move(result_column); + } + + return res; } - const auto * literal = schema_argument->as(); - if (!literal || literal->value.getType() != Field::Types::String) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, - "Object data type family must have a const string as its schema name parameter"); + /// Split requested subcolumn to the JSON path and Dynamic type subcolumn. + auto [path, path_subcolumn] = splitPathAndDynamicTypeSubcolumn(subcolumn_name, max_dynamic_paths, max_dynamic_types); + std::unique_ptr res; + if (auto it = typed_paths.find(path); it != typed_paths.end()) + { + res = std::make_unique(it->second->getDefaultSerialization()); + res->type = it->second; + } + else + { + res = std::make_unique(std::make_shared()); + res->type = std::make_shared(); + } - return std::make_shared(literal->value.safeGet(), is_nullable); + /// If column was provided, we should create a column for requested subcolumn. + if (data.column) + { + const auto & object_column = assert_cast(*data.column); + /// Try to find requested path in typed paths. + if (auto typed_it = object_column.getTypedPaths().find(path); typed_it != object_column.getTypedPaths().end()) + { + res->column = typed_it->second; + } + /// Try to find requested path in dynamic paths. + else if (auto dynamic_it = object_column.getDynamicPaths().find(path); dynamic_it != object_column.getDynamicPaths().end()) + { + res->column = dynamic_it->second; + } + /// Extract values of requested path from shared data. + else + { + auto dynamic_column = ColumnDynamic::create(max_dynamic_types); + dynamic_column->reserve(object_column.size()); + ColumnObject::fillPathColumnFromSharedData(*dynamic_column, path, object_column.getSharedDataPtr(), 0, object_column.size()); + res->column = std::move(dynamic_column); + } + } + + /// Get subcolumn for Dynamic type if needed. + if (!path_subcolumn.empty()) + { + res = res->type->getSubcolumnData(path_subcolumn, *res, throw_if_null); + if (!res) + return nullptr; + } + + if (typed_paths.contains(path)) + res->serialization = std::make_shared(res->serialization, path); + else + res->serialization = std::make_shared(res->serialization, path, path_subcolumn, max_dynamic_types); + + return res; } -void registerDataTypeObject(DataTypeFactory & factory) +static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject::SchemaFormat & schema_format) { - factory.registerDataType("Object", create); - factory.registerSimpleDataType("JSON", - [] { return std::make_shared("JSON", false); }, - DataTypeFactory::Case::Insensitive); + if (!arguments || arguments->children.empty()) + return std::make_shared(schema_format); + + std::unordered_map typed_paths; + std::unordered_set paths_to_skip; + std::vector path_regexps_to_skip; + + size_t max_dynamic_types = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES; + size_t max_dynamic_paths = DataTypeObject::DEFAULT_MAX_SEPARATELY_STORED_PATHS; + + for (const auto & argument : arguments->children) + { + const auto * object_type_argument = argument->as(); + if (object_type_argument->parameter) + { + const auto * function = object_type_argument->parameter->as(); + + if (!function || function->name != "equals") + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected parameter in {} type arguments: {}", magic_enum::enum_name(schema_format), function->formatForErrorMessage()); + + const auto * identifier = function->arguments->children[0]->as(); + if (!identifier) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected {} type argument: {}. Expected expression 'max_dynamic_types=N' or 'max_dynamic_paths=N'", magic_enum::enum_name(schema_format), function->formatForErrorMessage()); + + auto identifier_name = identifier->name(); + if (identifier_name != "max_dynamic_types" && identifier_name != "max_dynamic_paths") + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected parameter in {} type arguments: {}. Expected 'max_dynamic_types' or `max_dynamic_paths`", magic_enum::enum_name(schema_format), identifier_name); + + auto * literal = function->arguments->children[1]->as(); + /// Is 1000000 a good maximum for max paths? + size_t max_value = identifier_name == "max_dynamic_types" ? ColumnDynamic::MAX_DYNAMIC_TYPES_LIMIT : 1000000; + if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.safeGet() > max_value) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'{}' parameter for {} type should be a positive integer between 0 and {}. Got {}", identifier_name, magic_enum::enum_name(schema_format), max_value, function->arguments->children[1]->formatForErrorMessage()); + + if (identifier_name == "max_dynamic_types") + max_dynamic_types = literal->value.safeGet(); + else + max_dynamic_paths = literal->value.safeGet(); + } + else if (object_type_argument->path_with_type) + { + const auto * path_with_type = object_type_argument->path_with_type->as(); + auto data_type = DataTypeFactory::instance().get(path_with_type->type); + if (typed_paths.contains(path_with_type->name)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Found duplicated path with type: {}", path_with_type->name); + typed_paths.emplace(path_with_type->name, data_type); + } + else if (object_type_argument->skip_path) + { + const auto * identifier = object_type_argument->skip_path->as(); + if (!identifier) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST in SKIP section of {} type arguments: {}. Expected identifier with path name", magic_enum::enum_name(schema_format), object_type_argument->skip_path->formatForErrorMessage()); + + paths_to_skip.insert(identifier->name()); + } + else if (object_type_argument->skip_path_regexp) + { + const auto * literal = object_type_argument->skip_path_regexp->as(); + if (!literal || literal->value.getType() != Field::Types::String) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected AST in SKIP section of {} type arguments: {}. Expected identifier with path name", magic_enum::enum_name(schema_format), object_type_argument->skip_path->formatForErrorMessage()); + + path_regexps_to_skip.push_back(literal->value.safeGet()); + } + } + + std::sort(path_regexps_to_skip.begin(), path_regexps_to_skip.end()); + return std::make_shared(schema_format, std::move(typed_paths), std::move(paths_to_skip), std::move(path_regexps_to_skip), max_dynamic_paths, max_dynamic_types); +} + +static DataTypePtr createJSON(const ASTPtr & arguments) +{ + return createObject(arguments, DataTypeObject::SchemaFormat::JSON); +} + +void registerDataTypeJSON(DataTypeFactory & factory) +{ + if (!Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type) + factory.registerDataType("JSON", createJSON, DataTypeFactory::Case::Insensitive); } } diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index c610a1a8ba4..7eb2e7729de 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -1,48 +1,80 @@ #pragma once #include +#include #include -#include +#include +#include namespace DB { -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - class DataTypeObject : public IDataType { -private: - String schema_format; - bool is_nullable; - public: - DataTypeObject(const String & schema_format_, bool is_nullable_); + enum class SchemaFormat + { + JSON = 0, + }; + + /// Don't change these constants, it can break backward compatibility. + static constexpr size_t DEFAULT_MAX_SEPARATELY_STORED_PATHS = 1024; + static constexpr size_t NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR = 4; + static constexpr size_t NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR = 2; + + explicit DataTypeObject( + const SchemaFormat & schema_format_, + std::unordered_map typed_paths_ = {}, + std::unordered_set paths_to_skip_ = {}, + std::vector path_regexps_to_skip_ = {}, + size_t max_dynamic_paths_ = DEFAULT_MAX_SEPARATELY_STORED_PATHS, + size_t max_dynamic_types_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES); + + DataTypeObject(const SchemaFormat & schema_format_, size_t max_dynamic_paths_, size_t max_dynamic_types_); const char * getFamilyName() const override { return "Object"; } String doGetName() const override; TypeIndex getTypeId() const override { return TypeIndex::Object; } - MutableColumnPtr createColumn() const override { return ColumnObject::create(is_nullable); } + MutableColumnPtr createColumn() const override; - Field getDefault() const override - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getDefault() is not implemented for data type {}", getName()); - } + Field getDefault() const override { return Object(); } - bool haveSubtypes() const override { return false; } - bool equals(const IDataType & rhs) const override; bool isParametric() const override { return true; } - bool hasDynamicSubcolumnsDeprecated() const override { return true; } + bool canBeInsideNullable() const override { return false; } + bool supportsSparseSerialization() const override { return false; } + bool canBeInsideSparseColumns() const override { return false; } + bool isComparable() const override { return false; } + bool haveSubtypes() const override { return false; } + + bool equals(const IDataType & rhs) const override; + + bool hasDynamicSubcolumnsData() const override { return true; } + std::unique_ptr getDynamicSubcolumnData(std::string_view subcolumn_name, const SubstreamData & data, bool throw_if_null) const override; SerializationPtr doGetDefaultSerialization() const override; - bool hasNullableSubcolumns() const { return is_nullable; } + const SchemaFormat & getSchemaFormat() const { return schema_format; } + const std::unordered_map & getTypedPaths() const { return typed_paths; } + const std::unordered_set & getPathsToSkip() const { return paths_to_skip; } + const std::vector & getPathRegexpsToSkip() const { return path_regexps_to_skip; } - const String & getSchemaFormat() const { return schema_format; } + size_t getMaxDynamicTypes() const { return max_dynamic_types; } + size_t getMaxDynamicPaths() const { return max_dynamic_paths; } + +private: + SchemaFormat schema_format; + /// Set of paths with types that were specified in type declaration. + std::unordered_map typed_paths; + /// Set of paths that should be skipped during data parsing. + std::unordered_set paths_to_skip; + /// List of regular expressions that should be used to skip paths during data parsing. + std::vector path_regexps_to_skip; + /// Limit on the number of paths that can be stored as subcolumn. + size_t max_dynamic_paths; + /// Limit of dynamic types that should be used for Dynamic columns. + size_t max_dynamic_types; }; } diff --git a/src/DataTypes/DataTypeObjectDeprecated.cpp b/src/DataTypes/DataTypeObjectDeprecated.cpp new file mode 100644 index 00000000000..07f9c116e58 --- /dev/null +++ b/src/DataTypes/DataTypeObjectDeprecated.cpp @@ -0,0 +1,87 @@ +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int UNEXPECTED_AST_STRUCTURE; +} + +DataTypeObjectDeprecated::DataTypeObjectDeprecated(const String & schema_format_, bool is_nullable_) + : schema_format(Poco::toLower(schema_format_)) + , is_nullable(is_nullable_) +{ +} + +bool DataTypeObjectDeprecated::equals(const IDataType & rhs) const +{ + if (const auto * object = typeid_cast(&rhs)) + return schema_format == object->schema_format && is_nullable == object->is_nullable; + return false; +} + +SerializationPtr DataTypeObjectDeprecated::doGetDefaultSerialization() const +{ + return getObjectSerialization(schema_format); +} + +String DataTypeObjectDeprecated::doGetName() const +{ + WriteBufferFromOwnString out; + if (is_nullable) + out << "Object(Nullable(" << quote << schema_format << "))"; + else + out << "Object(" << quote << schema_format << ")"; + return out.str(); +} + +static DataTypePtr create(const ASTPtr & arguments) +{ + if (!arguments || arguments->children.size() != 1) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Object data type family must have one argument - name of schema format"); + + ASTPtr schema_argument = arguments->children[0]; + bool is_nullable = false; + + if (const auto * type = schema_argument->as()) + { + if (type->name != "Nullable" || type->arguments->children.size() != 1) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, + "Expected 'Nullable()' as parameter for type Object (function: {})", type->name); + + schema_argument = type->arguments->children[0]; + is_nullable = true; + } + + const auto * literal = schema_argument->as(); + if (!literal || literal->value.getType() != Field::Types::String) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, + "Object data type family must have a const string as its schema name parameter"); + + return std::make_shared(literal->value.safeGet(), is_nullable); +} + +void registerDataTypeObjectDeprecated(DataTypeFactory & factory) +{ + factory.registerDataType("Object", create); + if (Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type) + factory.registerSimpleDataType("JSON", + [] { return std::make_shared("JSON", false); }, + DataTypeFactory::Case::Insensitive); +} + +} diff --git a/src/DataTypes/DataTypeObjectDeprecated.h b/src/DataTypes/DataTypeObjectDeprecated.h new file mode 100644 index 00000000000..e1f81caaa4f --- /dev/null +++ b/src/DataTypes/DataTypeObjectDeprecated.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +class DataTypeObjectDeprecated : public IDataType +{ +private: + String schema_format; + bool is_nullable; + +public: + DataTypeObjectDeprecated(const String & schema_format_, bool is_nullable_); + + const char * getFamilyName() const override { return "Object"; } + String doGetName() const override; + TypeIndex getTypeId() const override { return TypeIndex::ObjectDeprecated; } + + MutableColumnPtr createColumn() const override { return ColumnObjectDeprecated::create(is_nullable); } + + Field getDefault() const override + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method getDefault() is not implemented for data type {}", getName()); + } + + bool haveSubtypes() const override { return false; } + bool equals(const IDataType & rhs) const override; + bool isParametric() const override { return true; } + bool hasDynamicSubcolumnsDeprecated() const override { return true; } + + SerializationPtr doGetDefaultSerialization() const override; + + bool hasNullableSubcolumns() const { return is_nullable; } + + const String & getSchemaFormat() const { return schema_format; } +}; + +} diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index 67b4a0a5e31..75556ed4090 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/DataTypes/DataTypesBinaryEncoding.cpp b/src/DataTypes/DataTypesBinaryEncoding.cpp index 610f246265e..dc0f2f3f5aa 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.cpp +++ b/src/DataTypes/DataTypesBinaryEncoding.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -94,8 +95,13 @@ enum class BinaryTypeIndex : uint8_t Bool = 0x2D, SimpleAggregateFunction = 0x2E, Nested = 0x2F, + JSON = 0x30, }; +/// In future we can introduce more arguments in the JSON data type definition. +/// To support such changes, use versioning in the serialization of JSON type. +const UInt8 TYPE_JSON_SERIALIZATION_VERSION = 0; + BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) { /// By default custom types don't have their own BinaryTypeIndex. @@ -202,7 +208,7 @@ BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) return BinaryTypeIndex::LowCardinality; case TypeIndex::Map: return BinaryTypeIndex::Map; - case TypeIndex::Object: + case TypeIndex::ObjectDeprecated: /// Object type will be deprecated and replaced by new implementation. No need to support it here. throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of type Object is not supported"); case TypeIndex::IPv4: @@ -216,6 +222,15 @@ BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type) /// JSONPaths is used only during schema inference and cannot be used anywhere else. case TypeIndex::JSONPaths: throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of type JSONPaths is not supported"); + case TypeIndex::Object: + { + const auto & object_type = assert_cast(*type); + switch (object_type.getSchemaFormat()) + { + case DataTypeObject::SchemaFormat::JSON: + return BinaryTypeIndex::JSON; + } + } } } @@ -480,6 +495,30 @@ void encodeDataType(const DataTypePtr & type, WriteBuffer & buf) writeStringBinary(type_name, buf); break; } + case BinaryTypeIndex::JSON: + { + const auto & object_type = assert_cast(*type); + /// Write version of the serialization because we can add new arguments in the JSON type. + writeBinary(TYPE_JSON_SERIALIZATION_VERSION, buf); + writeVarUInt(object_type.getMaxDynamicPaths(), buf); + writeBinary(UInt8(object_type.getMaxDynamicTypes()), buf); + const auto & typed_paths = object_type.getTypedPaths(); + writeVarUInt(typed_paths.size(), buf); + for (const auto & [path, path_type] : typed_paths) + { + writeStringBinary(path, buf); + encodeDataType(path_type, buf); + } + const auto & paths_to_skip = object_type.getPathsToSkip(); + writeVarUInt(paths_to_skip.size(), buf); + for (const auto & path : paths_to_skip) + writeStringBinary(path, buf); + const auto & path_regexps_to_skip = object_type.getPathRegexpsToSkip(); + writeVarUInt(path_regexps_to_skip.size(), buf); + for (const auto & regexp : path_regexps_to_skip) + writeStringBinary(regexp, buf); + break; + } default: break; } @@ -691,6 +730,54 @@ DataTypePtr decodeDataType(ReadBuffer & buf) readStringBinary(type_name, buf); return DataTypeFactory::instance().get(type_name); } + case BinaryTypeIndex::JSON: + { + UInt8 serialization_version; + readBinary(serialization_version, buf); + if (serialization_version > TYPE_JSON_SERIALIZATION_VERSION) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected version of JSON type binary encoding"); + size_t max_dynamic_paths; + readVarUInt(max_dynamic_paths, buf); + UInt8 max_dynamic_types; + readBinary(max_dynamic_types, buf); + size_t typed_paths_size; + readVarUInt(typed_paths_size, buf); + std::unordered_map typed_paths; + for (size_t i = 0; i != typed_paths_size; ++i) + { + String path; + readStringBinary(path, buf); + typed_paths[path] = decodeDataType(buf); + } + size_t paths_to_skip_size; + readVarUInt(paths_to_skip_size, buf); + std::unordered_set paths_to_skip; + paths_to_skip.reserve(paths_to_skip_size); + for (size_t i = 0; i != paths_to_skip_size; ++i) + { + String path; + readStringBinary(path, buf); + paths_to_skip.insert(path); + } + + size_t path_regexps_to_skip_size; + readVarUInt(path_regexps_to_skip_size, buf); + std::vector path_regexps_to_skip; + path_regexps_to_skip.reserve(path_regexps_to_skip_size); + for (size_t i = 0; i != path_regexps_to_skip_size; ++i) + { + String regexp; + readStringBinary(regexp, buf); + path_regexps_to_skip.push_back(regexp); + } + return std::make_shared( + DataTypeObject::SchemaFormat::JSON, + typed_paths, + paths_to_skip, + path_regexps_to_skip, + max_dynamic_paths, + max_dynamic_types); + } } throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown type code: {0:#04x}", UInt64(type)); diff --git a/src/DataTypes/DataTypesBinaryEncoding.h b/src/DataTypes/DataTypesBinaryEncoding.h index d02e7f85942..cdfbfee1ccf 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.h +++ b/src/DataTypes/DataTypesBinaryEncoding.h @@ -8,58 +8,59 @@ namespace DB /** Binary encoding for ClickHouse data types: -|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ClickHouse data type | Binary encoding | -|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Nothing | 0x00 | -| UInt8 | 0x01 | -| UInt16 | 0x02 | -| UInt32 | 0x03 | -| UInt64 | 0x04 | -| UInt128 | 0x05 | -| UInt256 | 0x06 | -| Int8 | 0x07 | -| Int16 | 0x08 | -| Int32 | 0x09 | -| Int64 | 0x0A | -| Int128 | 0x0B | -| Int256 | 0x0C | -| Float32 | 0x0D | -| Float64 | 0x0E | -| Date | 0x0F | -| Date32 | 0x10 | -| DateTime | 0x11 | -| DateTime(time_zone) | 0x12 | -| DateTime64(P) | 0x13 | -| DateTime64(P, time_zone) | 0x14 | -| String | 0x15 | -| FixedString(N) | 0x16 | -| Enum8 | 0x17... | -| Enum16 | 0x18...> | -| Decimal32(P, S) | 0x19 | -| Decimal64(P, S) | 0x1A | -| Decimal128(P, S) | 0x1B | -| Decimal256(P, S) | 0x1C | -| UUID | 0x1D | -| Array(T) | 0x1E | -| Tuple(T1, ..., TN) | 0x1F... | -| Tuple(name1 T1, ..., nameN TN) | 0x20... | -| Set | 0x21 | -| Interval | 0x22 | -| Nullable(T) | 0x23 | -| Function | 0x24... | -| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25...... | -| LowCardinality(T) | 0x26 | -| Map(K, V) | 0x27 | -| IPv4 | 0x28 | -| IPv6 | 0x29 | -| Variant(T1, ..., TN) | 0x2A... | -| Dynamic(max_types=N) | 0x2B | -| Custom type (Ring, Polygon, etc) | 0x2C | -| Bool | 0x2D | -| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E...... | -| Nested(name1 T1, ..., nameN TN) | 0x2F... | -|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|---------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ClickHouse data type | Binary encoding | +|---------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Nothing | 0x00 | +| UInt8 | 0x01 | +| UInt16 | 0x02 | +| UInt32 | 0x03 | +| UInt64 | 0x04 | +| UInt128 | 0x05 | +| UInt256 | 0x06 | +| Int8 | 0x07 | +| Int16 | 0x08 | +| Int32 | 0x09 | +| Int64 | 0x0A | +| Int128 | 0x0B | +| Int256 | 0x0C | +| Float32 | 0x0D | +| Float64 | 0x0E | +| Date | 0x0F | +| Date32 | 0x10 | +| DateTime | 0x11 | +| DateTime(time_zone) | 0x12 | +| DateTime64(P) | 0x13 | +| DateTime64(P, time_zone) | 0x14 | +| String | 0x15 | +| FixedString(N) | 0x16 | +| Enum8 | 0x17... | +| Enum16 | 0x18...> | +| Decimal32(P, S) | 0x19 | +| Decimal64(P, S) | 0x1A | +| Decimal128(P, S) | 0x1B | +| Decimal256(P, S) | 0x1C | +| UUID | 0x1D | +| Array(T) | 0x1E | +| Tuple(T1, ..., TN) | 0x1F... | +| Tuple(name1 T1, ..., nameN TN) | 0x20... | +| Set| 0x21 | +| Interval | 0x22 | +| Nullable(T) | 0x23 | +| Function | 0x24... | +| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25...... | +| LowCardinality(T) | 0x26 | +| Map(K, V) | 0x27 | +| IPv4 | 0x28 | +| IPv6 | 0x29 | +| Variant(T1, ..., TN) | 0x2A... | +| Dynamic(max_types=N) | 0x2B | +| Custom type (Ring, Polygon, etc) | 0x2C | +| Bool | 0x2D | +| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E...... | +| Nested(name1 T1, ..., nameN TN) | 0x2F... | +| JSON(max_dynamic_paths=N, max_dynamic_types=M, path Type, SKIP skip_path, SKIP REGEXP skip_path_regexp) | 0x30......... | +|---------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| Interval kind binary encoding: |---------------|-----------------| diff --git a/src/DataTypes/FieldToDataType.cpp b/src/DataTypes/FieldToDataType.cpp index 03874279a0b..536d2656021 100644 --- a/src/DataTypes/FieldToDataType.cpp +++ b/src/DataTypes/FieldToDataType.cpp @@ -178,8 +178,7 @@ DataTypePtr FieldToDataType::operator() (const Map & map) const template DataTypePtr FieldToDataType::operator() (const Object &) const { - /// TODO: Do we need different parameters for type Object? - return std::make_shared("json", false); + return std::make_shared(DataTypeObject::SchemaFormat::JSON); } template diff --git a/src/DataTypes/IDataType.cpp b/src/DataTypes/IDataType.cpp index 824bc6e33b0..49e5b2d022e 100644 --- a/src/DataTypes/IDataType.cpp +++ b/src/DataTypes/IDataType.cpp @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -363,9 +362,10 @@ bool isArray(TYPE data_type) { return WhichDataType(data_type).isArray(); } \ bool isTuple(TYPE data_type) { return WhichDataType(data_type).isTuple(); } \ bool isMap(TYPE data_type) {return WhichDataType(data_type).isMap(); } \ bool isInterval(TYPE data_type) {return WhichDataType(data_type).isInterval(); } \ -bool isObject(TYPE data_type) { return WhichDataType(data_type).isObject(); } \ +bool isObjectDeprecated(TYPE data_type) { return WhichDataType(data_type).isObjectDeprecated(); } \ bool isVariant(TYPE data_type) { return WhichDataType(data_type).isVariant(); } \ bool isDynamic(TYPE data_type) { return WhichDataType(data_type).isDynamic(); } \ +bool isObject(TYPE data_type) { return WhichDataType(data_type).isObject(); } \ bool isNothing(TYPE data_type) { return WhichDataType(data_type).isNothing(); } \ \ bool isColumnedAsNumber(TYPE data_type) \ diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index 397ae3d8be9..a7665e610ab 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -432,7 +432,7 @@ struct WhichDataType constexpr bool isMap() const {return idx == TypeIndex::Map; } constexpr bool isSet() const { return idx == TypeIndex::Set; } constexpr bool isInterval() const { return idx == TypeIndex::Interval; } - constexpr bool isObject() const { return idx == TypeIndex::Object; } + constexpr bool isObjectDeprecated() const { return idx == TypeIndex::ObjectDeprecated; } constexpr bool isNothing() const { return idx == TypeIndex::Nothing; } constexpr bool isNullable() const { return idx == TypeIndex::Nullable; } @@ -444,6 +444,7 @@ struct WhichDataType constexpr bool isVariant() const { return idx == TypeIndex::Variant; } constexpr bool isDynamic() const { return idx == TypeIndex::Dynamic; } + constexpr bool isObject() const { return idx == TypeIndex::Object; } }; /// IDataType helpers (alternative for IDataType virtual methods with single point of truth) @@ -502,9 +503,10 @@ bool isArray(TYPE data_type); \ bool isTuple(TYPE data_type); \ bool isMap(TYPE data_type); \ bool isInterval(TYPE data_type); \ -bool isObject(TYPE data_type); \ +bool isObjectDeprecated(TYPE data_type); \ bool isVariant(TYPE data_type); \ bool isDynamic(TYPE data_type); \ +bool isObject(TYPE data_type); \ bool isNothing(TYPE data_type); \ \ bool isColumnedAsNumber(TYPE data_type); \ diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index fe8a5ede102..fb64199a1b0 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -180,12 +180,12 @@ static DataTypePtr recreateTupleWithElements(const DataTypeTuple & type_tuple, c } static std::pair convertObjectColumnToTuple( - const ColumnObject & column_object, const DataTypeObject & type_object) + const ColumnObjectDeprecated & column_object, const DataTypeObjectDeprecated & type_object) { if (!column_object.isFinalized()) { auto finalized = column_object.cloneFinalized(); - const auto & finalized_object = assert_cast(*finalized); + const auto & finalized_object = assert_cast(*finalized); return convertObjectColumnToTuple(finalized_object, type_object); } @@ -211,9 +211,9 @@ static std::pair recursivlyConvertDynamicColumnToTuple( if (!type->hasDynamicSubcolumnsDeprecated()) return {column, type}; - if (const auto * type_object = typeid_cast(type.get())) + if (const auto * type_object = typeid_cast(type.get())) { - const auto & column_object = assert_cast(*column); + const auto & column_object = assert_cast(*column); return convertObjectColumnToTuple(column_object, *type_object); } @@ -369,7 +369,7 @@ static DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool che for (const auto & [key, subtypes] : subcolumns_types) { assert(!subtypes.empty()); - if (key.getPath() == ColumnObject::COLUMN_NAME_DUMMY) + if (key.getPath() == ColumnObjectDeprecated::COLUMN_NAME_DUMMY) continue; size_t first_dim = getNumberOfDimensions(*subtypes[0]); @@ -385,7 +385,7 @@ static DataTypePtr getLeastCommonTypeForObject(const DataTypes & types, bool che if (tuple_paths.empty()) { - tuple_paths.emplace_back(ColumnObject::COLUMN_NAME_DUMMY); + tuple_paths.emplace_back(ColumnObjectDeprecated::COLUMN_NAME_DUMMY); tuple_types.emplace_back(std::make_shared()); } @@ -452,7 +452,7 @@ static DataTypePtr getLeastCommonTypeForDynamicColumnsImpl( if (!type_in_storage->hasDynamicSubcolumnsDeprecated()) return type_in_storage; - if (isObject(type_in_storage)) + if (isObjectDeprecated(type_in_storage)) return getLeastCommonTypeForObject(concrete_types, check_ambiguos_paths); if (const auto * type_array = typeid_cast(type_in_storage.get())) @@ -494,9 +494,9 @@ DataTypePtr createConcreteEmptyDynamicColumn(const DataTypePtr & type_in_storage if (!type_in_storage->hasDynamicSubcolumnsDeprecated()) return type_in_storage; - if (isObject(type_in_storage)) + if (isObjectDeprecated(type_in_storage)) return std::make_shared( - DataTypes{std::make_shared()}, Names{ColumnObject::COLUMN_NAME_DUMMY}); + DataTypes{std::make_shared()}, Names{ColumnObjectDeprecated::COLUMN_NAME_DUMMY}); if (const auto * type_array = typeid_cast(type_in_storage.get())) return std::make_shared( @@ -838,7 +838,7 @@ DataTypePtr unflattenTuple(const PathsInData & paths, const DataTypes & tuple_ty return unflattenTuple(paths, tuple_types, tuple_columns).second; } -std::pair unflattenObjectToTuple(const ColumnObject & column) +std::pair unflattenObjectToTuple(const ColumnObjectDeprecated & column) { const auto & subcolumns = column.getSubcolumns(); @@ -846,7 +846,7 @@ std::pair unflattenObjectToTuple(const ColumnObject & co { auto type = std::make_shared( DataTypes{std::make_shared()}, - Names{ColumnObject::COLUMN_NAME_DUMMY}); + Names{ColumnObjectDeprecated::COLUMN_NAME_DUMMY}); return {type->createColumn()->cloneResized(column.size()), type}; } diff --git a/src/DataTypes/ObjectUtils.h b/src/DataTypes/ObjectUtils.h index 21e5c3b2f59..d4109b971a4 100644 --- a/src/DataTypes/ObjectUtils.h +++ b/src/DataTypes/ObjectUtils.h @@ -6,7 +6,7 @@ #include #include #include -#include +#include namespace DB { @@ -88,7 +88,7 @@ DataTypePtr unflattenTuple( const PathsInData & paths, const DataTypes & tuple_types); -std::pair unflattenObjectToTuple(const ColumnObject & column); +std::pair unflattenObjectToTuple(const ColumnObjectDeprecated & column); std::pair unflattenTuple( const PathsInData & paths, diff --git a/src/DataTypes/Serializations/ISerialization.cpp b/src/DataTypes/Serializations/ISerialization.cpp index 7642a6619b3..338edc3a144 100644 --- a/src/DataTypes/Serializations/ISerialization.cpp +++ b/src/DataTypes/Serializations/ISerialization.cpp @@ -202,6 +202,12 @@ String getNameForSubstreamPath( stream_name += "." + it->variant_element_name + ".null"; else if (it->type == SubstreamType::DynamicStructure) stream_name += ".dynamic_structure"; + else if (it->type == SubstreamType::ObjectStructure) + stream_name += ".object_structure"; + else if (it->type == SubstreamType::ObjectSharedData) + stream_name += ".object_shared_data"; + else if (it->type == SubstreamType::ObjectTypedPath || it->type == SubstreamType::ObjectDynamicPath) + stream_name += "." + it->object_path_name; } return stream_name; @@ -401,7 +407,17 @@ bool ISerialization::hasSubcolumnForPath(const SubstreamPath & path, size_t pref || path[last_elem].type == Substream::TupleElement || path[last_elem].type == Substream::ArraySizes || path[last_elem].type == Substream::VariantElement - || path[last_elem].type == Substream::VariantElementNullMap; + || path[last_elem].type == Substream::VariantElementNullMap + || path[last_elem].type == Substream::ObjectTypedPath; +} + +bool ISerialization::isEphemeralSubcolumn(const DB::ISerialization::SubstreamPath & path, size_t prefix_len) +{ + if (prefix_len == 0 || prefix_len > path.size()) + return false; + + size_t last_elem = prefix_len - 1; + return path[last_elem].type == Substream::VariantElementNullMap; } ISerialization::SubstreamData ISerialization::createFromPath(const SubstreamPath & path, size_t prefix_len) diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 5d0bf60c59f..480d5a4f7c4 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -176,8 +176,8 @@ public: SparseElements, SparseOffsets, - ObjectStructure, - ObjectData, + DeprecatedObjectStructure, + DeprecatedObjectData, VariantDiscriminators, NamedVariantDiscriminators, @@ -189,6 +189,12 @@ public: DynamicData, DynamicStructure, + ObjectData, + ObjectTypedPath, + ObjectDynamicPath, + ObjectSharedData, + ObjectStructure, + Regular, }; @@ -203,6 +209,9 @@ public: /// Name of substream for type from 'named_types'. String name_of_substream; + /// Path name for Object type elements. + String object_path_name; + /// Data for current substream. SubstreamData data; @@ -263,13 +272,13 @@ public: bool use_compact_variant_discriminators_serialization = false; - enum class DynamicStatisticsMode + enum class ObjectAndDynamicStatisticsMode { NONE, /// Don't write statistics. PREFIX, /// Write statistics in prefix. SUFFIX, /// Write statistics in suffix. }; - DynamicStatisticsMode dynamic_write_statistics = DynamicStatisticsMode::NONE; + ObjectAndDynamicStatisticsMode object_and_dynamic_write_statistics = ObjectAndDynamicStatisticsMode::NONE; }; struct DeserializeBinaryBulkSettings @@ -290,7 +299,7 @@ public: /// If not zero, may be used to avoid reallocations while reading column of String type. double avg_value_size_hint = 0; - bool dynamic_read_statistics = false; + bool object_and_dynamic_read_statistics = false; }; /// Call before serializeBinaryBulkWithMultipleStreams chain to write something before first mark. @@ -440,6 +449,10 @@ public: static bool hasSubcolumnForPath(const SubstreamPath & path, size_t prefix_len); static SubstreamData createFromPath(const SubstreamPath & path, size_t prefix_len); + /// Returns true if subcolumn doesn't actually stores any data in column and doesn't require a separate stream + /// for writing/reading data. For example, it's a null-map subcolumn of Variant type (it's always constructed from discriminators);. + static bool isEphemeralSubcolumn(const SubstreamPath & path, size_t prefix_len); + protected: template State * checkAndGetState(const StatePtr & state) const; diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 9cd0adcc2ed..ab24779ced2 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -115,7 +115,7 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( dynamic_state->max_dynamic_types = column_dynamic.getMaxDynamicTypes(); /// Write max_dynamic_types parameter, because it can differ from the max_dynamic_types /// that is specified in the Dynamic type (we could decrease it before merge). - writeBinaryLittleEndian(dynamic_state->max_dynamic_types, *stream); + writeVarUInt(dynamic_state->max_dynamic_types, *stream); dynamic_state->variant_type = variant_info.variant_type; dynamic_state->variant_names = variant_info.variant_names; @@ -123,7 +123,7 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( /// Write information about variants. size_t num_variants = dynamic_state->variant_names.size() - 1; /// Don't write shared variant, Dynamic column should always have it. - writeBinaryLittleEndian(num_variants, *stream); + writeVarUInt(num_variants, *stream); if (settings.data_types_binary_encoding) { const auto & variants = assert_cast(*dynamic_state->variant_type).getVariants(); @@ -143,7 +143,7 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( } /// Write statistics in prefix if needed. - if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::PREFIX) + if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::PREFIX) { const auto & statistics = column_dynamic.getStatistics(); /// First, write statistics for usual variants. @@ -225,8 +225,8 @@ void SerializationDynamic::deserializeBinaryBulkStatePrefix( return; auto dynamic_state = std::make_shared(); - dynamic_state->structure_state = structure_state; - dynamic_state->variant_serialization = checkAndGetState(structure_state)->variant_type->getDefaultSerialization(); + dynamic_state->structure_state = std::move(structure_state); + dynamic_state->variant_serialization = checkAndGetState(dynamic_state->structure_state)->variant_type->getDefaultSerialization(); settings.path.push_back(Substream::DynamicData); dynamic_state->variant_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_state->variant_state, cache); @@ -243,7 +243,7 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD DeserializeBinaryBulkStatePtr state = nullptr; if (auto cached_state = getFromSubstreamsDeserializeStatesCache(cache, settings.path)) { - state = cached_state; + state = std::move(cached_state); } else if (auto * structure_stream = settings.getter(settings.path)) { @@ -252,11 +252,11 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD readBinaryLittleEndian(structure_version, *structure_stream); auto structure_state = std::make_shared(structure_version); /// Read max_dynamic_types parameter. - readBinaryLittleEndian(structure_state->max_dynamic_types, *structure_stream); + readVarUInt(structure_state->max_dynamic_types, *structure_stream); /// Read information about variants. DataTypes variants; size_t num_variants; - readBinaryLittleEndian(num_variants, *structure_stream); + readVarUInt(num_variants, *structure_stream); variants.reserve(num_variants + 1); /// +1 for shared variant. if (settings.data_types_binary_encoding) { @@ -277,16 +277,12 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD auto variant_type = std::make_shared(variants); /// Read statistics. - if (settings.dynamic_read_statistics) + if (settings.object_and_dynamic_read_statistics) { ColumnDynamic::Statistics statistics(ColumnDynamic::Statistics::Source::READ); /// First, read statistics for usual variants. - size_t variant_size; for (const auto & variant : variant_type->getVariants()) - { - readVarUInt(variant_size, *structure_stream); - statistics.variants_statistics[variant->getName()] = variant_size; - } + readVarUInt(statistics.variants_statistics[variant->getName()], *structure_stream); /// Second, read statistics for shared variants. size_t statistics_size; @@ -295,8 +291,7 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD for (size_t i = 0; i != statistics_size; ++i) { readStringBinary(variant_name, *structure_stream); - readVarUInt(variant_size, *structure_stream); - statistics.shared_variants_statistics[variant_name] = variant_size; + readVarUInt(statistics.shared_variants_statistics[variant_name], *structure_stream); } structure_state->statistics = std::make_shared(std::move(statistics)); @@ -320,10 +315,10 @@ void SerializationDynamic::serializeBinaryBulkStateSuffix( settings.path.pop_back(); if (!stream) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Dynamic column structure during serialization of binary bulk state prefix"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Dynamic column structure during serialization of binary bulk state suffix"); /// Write statistics in suffix if needed. - if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::SUFFIX) + if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::SUFFIX) { /// First, write statistics for usual variants. for (const auto & variant_name : dynamic_state->variant_names) @@ -348,6 +343,18 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreams( size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const +{ + size_t tmp_size; + serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants(column, offset, limit, settings, state, tmp_size); +} + +void SerializationDynamic::serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state, + size_t & total_size_of_variants) const { const auto & column_dynamic = assert_cast(column); auto * dynamic_state = checkAndGetState(state); @@ -361,10 +368,18 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreams( throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_types parameter of Dynamic. Expected: {}, Got: {}", dynamic_state->max_dynamic_types, column_dynamic.getMaxDynamicTypes()); settings.path.push_back(Substream::DynamicData); + assert_cast(*dynamic_state->variant_serialization) + .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics( + *variant_column, + offset, + limit, + settings, + dynamic_state->variant_state, + dynamic_state->statistics.variants_statistics, + total_size_of_variants); + if (dynamic_state->recalculate_statistics) { - assert_cast(*dynamic_state->variant_serialization) - .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.variants_statistics); /// Calculate statistics for shared variants. const auto & shared_variant = column_dynamic.getSharedVariant(); if (!shared_variant.empty()) @@ -389,10 +404,6 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreams( } } } - else - { - assert_cast(*dynamic_state->variant_serialization).serializeBinaryBulkWithMultipleStreams(*variant_column, offset, limit, settings, dynamic_state->variant_state); - } settings.path.pop_back(); } @@ -753,6 +764,12 @@ void SerializationDynamic::serializeTextJSON(const IColumn & column, size_t row_ serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } +void SerializationDynamic::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + const auto & dynamic_column = assert_cast(column); + dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextJSONPretty(dynamic_column.getVariantColumn(), row_num, ostr, settings, indent); +} + void SerializationDynamic::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { auto read_field = [&settings](ReadBuffer & buf) diff --git a/src/DataTypes/Serializations/SerializationDynamic.h b/src/DataTypes/Serializations/SerializationDynamic.h index 3dbf311fb6c..f34b5d0e770 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.h +++ b/src/DataTypes/Serializations/SerializationDynamic.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace DB @@ -11,7 +12,7 @@ class SerializationDynamicElement; class SerializationDynamic : public ISerialization { public: - explicit SerializationDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_dynamic_types_) + explicit SerializationDynamic(size_t max_dynamic_types_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES) : max_dynamic_types(max_dynamic_types_) { } @@ -59,6 +60,14 @@ public: SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const override; + void serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state, + size_t & total_size_of_variants) const; + void deserializeBinaryBulkWithMultipleStreams( ColumnPtr & column, size_t limit, @@ -89,6 +98,7 @@ public: bool tryDeserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; bool tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; diff --git a/src/DataTypes/Serializations/SerializationDynamicElement.cpp b/src/DataTypes/Serializations/SerializationDynamicElement.cpp index cffca14bca5..a16186abf2e 100644 --- a/src/DataTypes/Serializations/SerializationDynamicElement.cpp +++ b/src/DataTypes/Serializations/SerializationDynamicElement.cpp @@ -53,6 +53,7 @@ void SerializationDynamicElement::enumerateStreams( .withColumn(data.column) .withSerializationInfo(data.serialization_info) .withDeserializeState(deserialize_state->variant_element_state); + settings.path.back().data = variant_data; deserialize_state->variant_serialization->enumerateStreams(settings, callback, variant_data); settings.path.pop_back(); } diff --git a/src/DataTypes/Serializations/SerializationJSON.cpp b/src/DataTypes/Serializations/SerializationJSON.cpp new file mode 100644 index 00000000000..092ccd1c5a5 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationJSON.cpp @@ -0,0 +1,405 @@ +#include +#include +#include + +#if USE_SIMDJSON +#include +#endif +#if USE_RAPIDJSON +#include +#endif +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int INCORRECT_DATA; +} + +template +SerializationJSON::SerializationJSON( + std::unordered_map typed_paths_serializations_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_regexps_to_skip_, + std::unique_ptr> json_extract_tree_) + : SerializationObject(std::move(typed_paths_serializations_), paths_to_skip_, path_regexps_to_skip_) + , json_extract_tree(std::move(json_extract_tree_)) +{ +} + +namespace +{ + +/// Struct that represents elements of the JSON path. +/// "a.b.c" -> ["a", "b", "c"] +struct PathElements +{ + explicit PathElements(const String & path) + { + const char * start = path.data(); + const char * end = start + path.size(); + const char * pos = start; + const char * last_dot_pos = pos - 1; + for (pos = start; pos != end; ++pos) + { + if (*pos == '.') + { + elements.emplace_back(last_dot_pos + 1, size_t(pos - last_dot_pos - 1)); + last_dot_pos = pos; + } + } + + elements.emplace_back(last_dot_pos + 1, size_t(pos - last_dot_pos - 1)); + } + + size_t size() const { return elements.size(); } + + std::vector elements; +}; + +/// Struct that represents a prefix of a JSON path. Used during output of the JSON object. +struct Prefix +{ + /// Shrink current prefix to the common prefix of current prefix and specified path. + /// For example, if current prefix is a.b.c.d and path is a.b.e, then shrink the prefix to a.b. + void shrinkToCommonPrefix(const PathElements & path_elements) + { + /// Don't include last element in path_elements in the prefix. + size_t i = 0; + while (i != elements.size() && i != (path_elements.elements.size() - 1) && elements[i].first == path_elements.elements[i]) + ++i; + elements.resize(i); + } + + /// Check is_first flag in current object. + bool isFirstInCurrentObject() const + { + if (elements.empty()) + return root_is_first_flag; + return elements.back().second; + } + + /// Set flag is_first = false in current object. + void setNotFirstInCurrentObject() + { + if (elements.empty()) + root_is_first_flag = false; + else + elements.back().second = false; + } + + size_t size() const { return elements.size(); } + + /// Elements of the prefix: (path element, is_first flag in this prefix). + /// is_first flag indicates if we already serialized some key in the object with such prefix. + std::vector> elements; + bool root_is_first_flag = true; +}; + +} + +template +void SerializationJSON::serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, bool pretty, size_t indent) const +{ + const auto & column_object = assert_cast(column); + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + size_t shared_data_offset = shared_data_offsets[static_cast(row_num) - 1]; + size_t shared_data_end = shared_data_offsets[static_cast(row_num)]; + + /// We need to convert the set of paths in this row to a JSON object. + /// To do it, we first collect all the paths from current row, then we sort them + /// and construct the resulting JSON object by iterating over sorted list of paths. + /// For example: + /// b.c, a.b, a.a, b.e, g, h.u.t -> a.a, a.b, b.c, b.e, g, h.u.t -> {"a" : {"a" : ..., "b" : ...}, "b" : {"c" : ..., "e" : ...}, "g" : ..., "h" : {"u" : {"t" : ...}}}. + std::vector sorted_paths; + sorted_paths.reserve(typed_paths.size() + dynamic_paths.size() + (shared_data_end - shared_data_offset)); + for (const auto & [path, _] : typed_paths) + sorted_paths.emplace_back(path); + for (const auto & [path, dynamic_column] : dynamic_paths) + { + /// We consider null value and absence of the path in a row as equivalent cases, because we cannot actually distinguish them. + /// So, we don't output null values at all. + if (!dynamic_column->isNullAt(row_num)) + sorted_paths.emplace_back(path); + } + for (size_t i = shared_data_offset; i != shared_data_end; ++i) + { + auto path = shared_data_paths->getDataAt(i).toString(); + sorted_paths.emplace_back(path); + } + + std::sort(sorted_paths.begin(), sorted_paths.end()); + + if (pretty) + writeCString("{\n", ostr); + else + writeChar('{', ostr); + size_t index_in_shared_data_values = shared_data_offset; + /// current_prefix represents the path of the object we are currently serializing keys in. + Prefix current_prefix; + for (const auto & path : sorted_paths) + { + PathElements path_elements(path); + /// Change prefix to common prefix between current prefix and current path. + /// If prefix changed (it can only decrease), close all finished objects. + /// For example: + /// Current prefix: a.b.c.d + /// Current path: a.b.e.f + /// It means now we have : {..., "a" : {"b" : {"c" : {"d" : ... + /// Common prefix will be a.b, so it means we should close objects a.b.c.d and a.b.c: {..., "a" : {"b" : {"c" : {"d" : ...}} + /// and continue serializing keys in object a.b + size_t prev_prefix_size = current_prefix.size(); + current_prefix.shrinkToCommonPrefix(path_elements); + size_t prefix_size = current_prefix.size(); + if (prefix_size != prev_prefix_size) + { + size_t objects_to_close = prev_prefix_size - prefix_size; + if (pretty) + { + writeChar('\n', ostr); + for (size_t i = 0; i != objects_to_close; ++i) + { + writeChar(' ', (indent + prefix_size + objects_to_close - i) * 4, ostr); + if (i != objects_to_close - 1) + writeCString("}\n", ostr); + else + writeChar('}', ostr); + } + } + else + { + for (size_t i = 0; i != objects_to_close; ++i) + writeChar('}', ostr); + } + } + + /// Now we are inside object that has common prefix with current path. + /// We should go inside all objects in current path. + /// From the example above we should open object a.b.e: + /// {..., "a" : {"b" : {"c" : {"d" : ...}}, "e" : { + if (prefix_size + 1 < path_elements.size()) + { + for (size_t i = prefix_size; i != path_elements.size() - 1; ++i) + { + /// Write comma before the key if it's not the first key in this prefix. + if (!current_prefix.isFirstInCurrentObject()) + { + if (pretty) + writeCString(",\n", ostr); + else + writeChar(',', ostr); + } + else + { + current_prefix.setNotFirstInCurrentObject(); + } + + if (pretty) + { + writeChar(' ', (indent + i + 1) * 4, ostr); + writeJSONString(path_elements.elements[i], ostr, settings); + writeCString(" : {\n", ostr); + } + else + { + writeJSONString(path_elements.elements[i], ostr, settings); + writeCString(":{", ostr); + } + + /// Update current prefix. + current_prefix.elements.emplace_back(path_elements.elements[i], true); + } + } + + /// Write comma before the key if it's not the first key in this prefix. + if (!current_prefix.isFirstInCurrentObject()) + { + if (pretty) + writeCString(",\n", ostr); + else + writeChar(',', ostr); + } + else + { + current_prefix.setNotFirstInCurrentObject(); + } + + if (pretty) + { + writeChar(' ', (indent + current_prefix.size() + 1) * 4, ostr); + writeJSONString(path_elements.elements.back(), ostr, settings); + writeCString(" : ", ostr); + } + else + { + writeJSONString(path_elements.elements.back(), ostr, settings); + writeCString(":", ostr); + } + + /// Serialize value of current path. + if (auto typed_it = typed_paths.find(path); typed_it != typed_paths.end()) + { + if (pretty) + typed_path_serializations.at(path)->serializeTextJSONPretty(*typed_it->second, row_num, ostr, settings, indent + current_prefix.size() + 1); + else + typed_path_serializations.at(path)->serializeTextJSON(*typed_it->second, row_num, ostr, settings); + } + else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + { + if (pretty) + dynamic_serialization->serializeTextJSONPretty(*dynamic_it->second, row_num, ostr, settings, indent + current_prefix.size() + 1); + else + dynamic_serialization->serializeTextJSON(*dynamic_it->second, row_num, ostr, settings); + } + else + { + /// To serialize value stored in shared data we should first deserialize it from binary format. + auto tmp_dynamic_column = ColumnDynamic::create(); + tmp_dynamic_column->reserve(1); + column_object.deserializeValueFromSharedData(shared_data_values, index_in_shared_data_values++, *tmp_dynamic_column); + + if (pretty) + dynamic_serialization->serializeTextJSONPretty(*tmp_dynamic_column, 0, ostr, settings, indent + current_prefix.size() + 1); + else + dynamic_serialization->serializeTextJSON(*tmp_dynamic_column, 0, ostr, settings); + } + } + + /// Close all remaining open objects. + if (pretty) + { + writeChar('\n', ostr); + for (size_t i = 0; i != current_prefix.elements.size(); ++i) + { + writeChar(' ', (indent + current_prefix.size() - i) * 4, ostr); + writeCString("}\n", ostr); + } + writeChar(' ', indent * 4, ostr); + writeChar('}', ostr); + } + else + { + for (size_t i = 0; i != current_prefix.elements.size(); ++i) + writeChar('}', ostr); + writeChar('}', ostr); + } +} + +template +void SerializationJSON::deserializeTextImpl(IColumn & column, std::string_view object, const FormatSettings & settings) const +{ + typename Parser::Element document; + auto parser = parsers_pool.get([] { return new Parser; }); + if (!parser->parse(object, document)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot parse JSON object here: {}", object); + + String error; + if (!json_extract_tree->insertResultToColumn(column, document, insert_settings, settings, error)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot insert data into JSON column: {}", error); +} + +template +void SerializationJSON::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + serializeTextImpl(column, row_num, ostr, settings); +} + +template +void SerializationJSON::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object; + readStringUntilEOF(object, istr); + deserializeTextImpl(column, object, settings); +} + +template +void SerializationJSON::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString buf; + serializeTextImpl(column, row_num, buf, settings); + writeEscapedString(buf.str(), ostr); +} + +template +void SerializationJSON::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object; + readEscapedString(object, istr); + deserializeTextImpl(column, object, settings); +} + +template +void SerializationJSON::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString buf; + serializeTextImpl(column, row_num, buf, settings); + writeQuotedString(buf.str(), ostr); +} + +template +void SerializationJSON::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object; + readQuotedString(object, istr); + deserializeTextImpl(column, object, settings); +} + +template +void SerializationJSON::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString buf; + serializeTextImpl(column, row_num, buf, settings); + writeCSVString(buf.str(), ostr); +} + +template +void SerializationJSON::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object; + readCSVString(object, istr, settings.csv); + deserializeTextImpl(column, object, settings); +} + +template +void SerializationJSON::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString buf; + serializeTextImpl(column, row_num, buf, settings); + writeXMLStringForTextElement(buf.str(), ostr); +} + +template +void SerializationJSON::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + serializeTextImpl(column, row_num, ostr, settings); +} + +template +void SerializationJSON::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + serializeTextImpl(column, row_num, ostr, settings, true, indent); +} + +template +void SerializationJSON::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + String object_buffer; + auto object_view = readJSONObjectAsViewPossiblyInvalid(istr, object_buffer); + deserializeTextImpl(column, object_view, settings); +} + +#if USE_SIMDJSON +template class SerializationJSON; +#endif +#if USE_RAPIDJSON +template class SerializationJSON; +#else +template class SerializationJSON; +#endif + +} diff --git a/src/DataTypes/Serializations/SerializationJSON.h b/src/DataTypes/Serializations/SerializationJSON.h new file mode 100644 index 00000000000..934c94527f3 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationJSON.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +/// Class for text serialization/deserialization of the JSON data type. +template +class SerializationJSON : public SerializationObject +{ +public: + SerializationJSON( + std::unordered_map typed_paths_serializations_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_regexps_to_skip_, + std::unique_ptr> json_extract_tree_); + + void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; + void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + + void serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + +private: + void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, bool pretty = false, size_t indent = 0) const; + void deserializeTextImpl(IColumn & column, std::string_view object, const FormatSettings & settings) const; + + std::unique_ptr> json_extract_tree; + JSONExtractInsertSettings insert_settings; + /// Pool of parser objects to make SerializationJSON thread safe. + mutable SimpleObjectPool parsers_pool; +}; + +} diff --git a/src/DataTypes/Serializations/SerializationLowCardinality.cpp b/src/DataTypes/Serializations/SerializationLowCardinality.cpp index 40071c4607a..3195a04d348 100644 --- a/src/DataTypes/Serializations/SerializationLowCardinality.cpp +++ b/src/DataTypes/Serializations/SerializationLowCardinality.cpp @@ -268,9 +268,16 @@ void SerializationLowCardinality::serializeBinaryBulkStateSuffix( void SerializationLowCardinality::deserializeBinaryBulkStatePrefix( DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, - SubstreamsDeserializeStatesCache * /*cache*/) const + SubstreamsDeserializeStatesCache * cache) const { settings.path.push_back(Substream::DictionaryKeys); + + if (auto cached_state = getFromSubstreamsDeserializeStatesCache(cache, settings.path)) + { + state = std::move(cached_state); + return; + } + auto * stream = settings.getter(settings.path); settings.path.pop_back(); diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index c6c87b5aa7b..0042aa6d89d 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -1,586 +1,793 @@ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include namespace DB { namespace ErrorCodes { - extern const int NOT_IMPLEMENTED; extern const int INCORRECT_DATA; - extern const int CANNOT_READ_ALL_DATA; - extern const int ARGUMENT_OUT_OF_BOUND; - extern const int CANNOT_PARSE_TEXT; - extern const int EXPERIMENTAL_FEATURE_ERROR; + extern const int LOGICAL_ERROR; } -template -template -void SerializationObject::deserializeTextImpl(IColumn & column, Reader && reader) const +SerializationObject::SerializationObject( + std::unordered_map typed_path_serializations_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_regexps_to_skip_) + : typed_path_serializations(std::move(typed_path_serializations_)) + , paths_to_skip(paths_to_skip_) + , dynamic_serialization(std::make_shared()) + , shared_data_serialization(getTypeOfSharedData()->getDefaultSerialization()) { - auto & column_object = assert_cast(column); + /// We will need sorted order of typed paths to serialize them in order for consistency. + sorted_typed_paths.reserve(typed_path_serializations.size()); + for (const auto & [path, _] : typed_path_serializations) + sorted_typed_paths.emplace_back(path); + std::sort(sorted_typed_paths.begin(), sorted_typed_paths.end()); + sorted_paths_to_skip.assign(paths_to_skip.begin(), paths_to_skip.end()); + std::sort(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end()); + for (const auto & regexp_str : path_regexps_to_skip_) + path_regexps_to_skip.emplace_back(regexp_str); +} - String buf; - reader(buf); - std::optional result; +const DataTypePtr & SerializationObject::getTypeOfSharedData() +{ + /// Array(Tuple(String, String)) + static const DataTypePtr type = std::make_shared(std::make_shared(DataTypes{std::make_shared(), std::make_shared()}, Names{"paths", "values"})); + return type; +} - /// Treat empty string as an empty object - /// for better CAST from String to Object. - if (!buf.empty()) +bool SerializationObject::shouldSkipPath(const String & path) const +{ + if (paths_to_skip.contains(path)) + return true; + + auto it = std::lower_bound(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end(), path); + if (it != sorted_paths_to_skip.end() && it != sorted_paths_to_skip.begin() && path.starts_with(*std::prev(it))) + return true; + + for (const auto & regexp : path_regexps_to_skip) { - auto parser = parsers_pool.get([] { return new Parser; }); - result = parser->parse(buf.data(), buf.size()); - } - else - { - result = ParseResult{}; + if (re2::RE2::FullMatch(path, regexp)) + return true; } - if (!result) - throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot parse object"); + return false; +} - auto & [paths, values] = *result; - assert(paths.size() == values.size()); +SerializationObject::ObjectSerializationVersion::ObjectSerializationVersion(UInt64 version) : value(static_cast(version)) +{ + checkVersion(version); +} - size_t old_column_size = column_object.size(); - for (size_t i = 0; i < paths.size(); ++i) +void SerializationObject::ObjectSerializationVersion::checkVersion(UInt64 version) +{ + if (version != BASIC) + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Object structure serialization."); +} + +struct SerializeBinaryBulkStateObject: public ISerialization::SerializeBinaryBulkState +{ + SerializationObject::ObjectSerializationVersion serialization_version; + size_t max_dynamic_paths; + std::vector sorted_dynamic_paths; + std::unordered_map typed_path_states; + std::unordered_map dynamic_path_states; + ISerialization::SerializeBinaryBulkStatePtr shared_data_state; + /// Paths statistics. + ColumnObject::Statistics statistics; + /// If true, statistics will be recalculated during serialization. + bool recalculate_statistics = false; + + explicit SerializeBinaryBulkStateObject(UInt64 serialization_version_) + : serialization_version(serialization_version_), statistics(ColumnObject::Statistics::Source::READ) { - auto field_info = getFieldInfo(values[i]); - if (field_info.need_fold_dimension) - values[i] = applyVisitor(FieldVisitorFoldDimension(field_info.num_dimensions), std::move(values[i])); - if (isNothing(field_info.scalar_type)) - continue; + } +}; - if (!column_object.hasSubcolumn(paths[i])) +struct DeserializeBinaryBulkStateObject : public ISerialization::DeserializeBinaryBulkState +{ + std::unordered_map typed_path_states; + std::unordered_map dynamic_path_states; + ISerialization::DeserializeBinaryBulkStatePtr shared_data_state; + ISerialization::DeserializeBinaryBulkStatePtr structure_state; +}; + +void SerializationObject::enumerateStreams(EnumerateStreamsSettings & settings, const StreamCallback & callback, const SubstreamData & data) const +{ + settings.path.push_back(Substream::ObjectStructure); + callback(settings.path); + settings.path.pop_back(); + + const auto * column_object = data.column ? &assert_cast(*data.column) : nullptr; + const auto * type_object = data.type ? &assert_cast(*data.type) : nullptr; + const auto * deserialize_state = data.deserialize_state ? checkAndGetState(data.deserialize_state) : nullptr; + const auto * structure_state = deserialize_state ? checkAndGetState(deserialize_state->structure_state) : nullptr; + settings.path.push_back(Substream::ObjectData); + + /// First, iterate over typed paths in sorted order, we will always serialize them. + for (const auto & path : sorted_typed_paths) + { + settings.path.back().creator = std::make_shared(path); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + const auto & serialization = typed_path_serializations.at(path); + auto path_data = SubstreamData(serialization) + .withType(type_object ? type_object->getTypedPaths().at(path) : nullptr) + .withColumn(column_object ? column_object->getTypedPaths().at(path) : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->typed_path_states.at(path) : nullptr); + settings.path.back().data = path_data; + serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + settings.path.back().creator.reset(); + } + + /// If column or deserialization state was provided, iterate over dynamic paths, + if (column_object || structure_state) + { + /// Enumerate dynamic paths in sorted order for consistency. + const auto * dynamic_paths = column_object ? &column_object->getDynamicPaths() : nullptr; + std::vector sorted_dynamic_paths; + /// If we have deserialize_state we can take sorted dynamic paths list from it. + if (structure_state) { - if (paths[i].hasNested()) - column_object.addNestedSubcolumn(paths[i], field_info, old_column_size); - else - column_object.addSubcolumn(paths[i], old_column_size); + sorted_dynamic_paths = structure_state->sorted_dynamic_paths; + } + else + { + sorted_dynamic_paths.reserve(dynamic_paths->size()); + for (const auto & [path, _] : *dynamic_paths) + sorted_dynamic_paths.push_back(path); + std::sort(sorted_dynamic_paths.begin(), sorted_dynamic_paths.end()); } - auto & subcolumn = column_object.getSubcolumn(paths[i]); - assert(subcolumn.size() == old_column_size); - - subcolumn.insert(std::move(values[i]), std::move(field_info)); - } - - /// Insert default values to missed subcolumns. - const auto & subcolumns = column_object.getSubcolumns(); - for (const auto & entry : subcolumns) - { - if (entry->data.size() == old_column_size) + DataTypePtr dynamic_type = std::make_shared(); + for (const auto & path : sorted_dynamic_paths) { - bool inserted = column_object.tryInsertDefaultFromNested(entry); - if (!inserted) - entry->data.insertDefault(); + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(dynamic_serialization) + .withType(dynamic_type) + .withColumn(dynamic_paths ? dynamic_paths->at(path) : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->dynamic_path_states.at(path) : nullptr); + settings.path.back().data = path_data; + dynamic_serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); } } - column_object.incrementNumRows(); + settings.path.push_back(Substream::ObjectSharedData); + auto shared_data_substream_data = SubstreamData(shared_data_serialization) + .withType(getTypeOfSharedData()) + .withColumn(column_object ? column_object->getSharedDataPtr() : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->shared_data_state : nullptr); + shared_data_serialization->enumerateStreams(settings, callback, shared_data_substream_data); + settings.path.pop_back(); + settings.path.pop_back(); } -template -void SerializationObject::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const -{ - deserializeTextImpl(column, [&](String & s) { readStringInto(s, istr); }); -} - -template -void SerializationObject::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const -{ - deserializeTextImpl(column, [&](String & s) { settings.tsv.crlf_end_of_line_input ? readEscapedStringCRLF(s, istr) : readEscapedString(s, istr); }); -} - -template -void SerializationObject::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings &) const -{ - deserializeTextImpl(column, [&](String & s) { readQuotedStringInto(s, istr); }); -} - -template -void SerializationObject::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const -{ - deserializeTextImpl(column, [&](String & s) { Parser::readJSON(s, istr); }); -} - -template -void SerializationObject::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const -{ - deserializeTextImpl(column, [&](String & s) { readCSVStringInto(s, istr, settings.csv); }); -} - -template -template -void SerializationObject::checkSerializationIsSupported(const TSettings & settings) const -{ - if (settings.position_independent_encoding) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject doesn't support serialization with position independent encoding"); -} - -template -struct SerializationObject::SerializeStateObject : public ISerialization::SerializeBinaryBulkState -{ - DataTypePtr nested_type; - SerializationPtr nested_serialization; - SerializeBinaryBulkStatePtr nested_state; -}; - -template -struct SerializationObject::DeserializeStateObject : public ISerialization::DeserializeBinaryBulkState -{ - BinarySerializationKind kind; - DataTypePtr nested_type; - SerializationPtr nested_serialization; - DeserializeBinaryBulkStatePtr nested_state; -}; - -template -void SerializationObject::serializeBinaryBulkStatePrefix( +void SerializationObject::serializeBinaryBulkStatePrefix( const IColumn & column, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - checkSerializationIsSupported(settings); - if (state) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject doesn't support serialization with non-trivial state"); - const auto & column_object = assert_cast(column); - if (!column_object.isFinalized()) - { - auto finalized = column_object.cloneFinalized(); - serializeBinaryBulkStatePrefix(*finalized, settings, state); - return; - } + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data = column_object.getSharedDataPtr(); settings.path.push_back(Substream::ObjectStructure); auto * stream = settings.getter(settings.path); + settings.path.pop_back(); if (!stream) - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Missing stream for kind of binary serialization"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Object column structure during serialization of binary bulk state prefix"); - auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); + /// Write serialization version. + UInt64 serialization_version = ObjectSerializationVersion::Value::BASIC; + writeBinaryLittleEndian(serialization_version, *stream); - writeIntBinary(static_cast(BinarySerializationKind::TUPLE), *stream); - writeStringBinary(tuple_type->getName(), *stream); + auto object_state = std::make_shared(serialization_version); + object_state->max_dynamic_paths = column_object.getMaxDynamicPaths(); + /// Write max_dynamic_paths parameter. + writeVarUInt(object_state->max_dynamic_paths, *stream); + /// Write all dynamic paths in sorted order. + object_state->sorted_dynamic_paths.reserve(dynamic_paths.size()); + for (const auto & [path, _] : dynamic_paths) + object_state->sorted_dynamic_paths.push_back(path); + std::sort(object_state->sorted_dynamic_paths.begin(), object_state->sorted_dynamic_paths.end()); + writeVarUInt(object_state->sorted_dynamic_paths.size(), *stream); + for (const auto & path : object_state->sorted_dynamic_paths) + writeStringBinary(path, *stream); - auto state_object = std::make_shared(); - state_object->nested_type = tuple_type; - state_object->nested_serialization = tuple_type->getDefaultSerialization(); + /// Write statistics in prefix if needed. + if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::PREFIX) + { + const auto & statistics = column_object.getStatistics(); + /// First, write statistics for dynamic paths. + for (const auto & path : object_state->sorted_dynamic_paths) + { + size_t number_of_non_null_values = 0; + /// Check if we can use statistics stored in the column. There are 2 possible sources + /// of this statistics: + /// - statistics calculated during merge of some data parts (Statistics::Source::MERGE) + /// - statistics read from the data part during deserialization of Object column (Statistics::Source::READ). + /// We can rely only on statistics calculated during the merge, because column with statistics that was read + /// during deserialization from some data part could be filtered/limited/transformed/etc and so the statistics can be outdated. + if (statistics && statistics->source == ColumnObject::Statistics::Source::MERGE) + number_of_non_null_values = statistics->dynamic_paths_statistics.at(path); + /// Otherwise we can use only path column from current object column. + else + number_of_non_null_values = (dynamic_paths.at(path)->size() - dynamic_paths.at(path)->getNumberOfDefaultRows()); + writeVarUInt(number_of_non_null_values, *stream); + } - settings.path.back() = Substream::ObjectData; - state_object->nested_serialization->serializeBinaryBulkStatePrefix(*tuple_column, settings, state_object->nested_state); + /// Second, write statistics for paths in shared data. + /// Check if we have statistics calculated during merge of some data parts (Statistics::Source::MERGE). + if (statistics && statistics->source == ColumnObject::Statistics::Source::MERGE) + { + writeVarUInt(statistics->shared_data_paths_statistics.size(), *stream); + for (const auto & [path, size] : statistics->shared_data_paths_statistics) + { + writeStringBinary(path, *stream); + writeVarUInt(size, *stream); + } + } + /// If we don't have statistics for shared data from merge, calculate it from the column. + else + { + std::unordered_map shared_data_paths_statistics; + const auto [shared_data_paths, _] = column_object.getSharedDataPathsAndValues(); + for (size_t i = 0; i != shared_data_paths->size(); ++i) + { + auto path = shared_data_paths->getDataAt(i).toView(); + if (auto it = shared_data_paths_statistics.find(path); it != shared_data_paths_statistics.end()) + ++it->second; + else if (shared_data_paths_statistics.size() < ColumnObject::Statistics::MAX_SHARED_DATA_STATISTICS_SIZE) + shared_data_paths_statistics.emplace(path, 1); + } - state = std::move(state_object); - settings.path.pop_back(); -} - -template -void SerializationObject::serializeBinaryBulkStateSuffix( - SerializeBinaryBulkSettings & settings, - SerializeBinaryBulkStatePtr & state) const -{ - checkSerializationIsSupported(settings); - auto * state_object = checkAndGetState(state); + writeVarUInt(shared_data_paths_statistics.size(), *stream); + for (const auto & [path, size] : shared_data_paths_statistics) + { + writeStringBinary(path, *stream); + writeVarUInt(size, *stream); + } + } + } + /// Otherwise statistics will be written in the suffix, in this case we will recalculate + /// statistics during serialization to make it more precise. + else + { + object_state->recalculate_statistics = true; + } settings.path.push_back(Substream::ObjectData); - state_object->nested_serialization->serializeBinaryBulkStateSuffix(settings, state_object->nested_state); + + for (const auto & path : sorted_typed_paths) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->serializeBinaryBulkStatePrefix(*typed_paths.at(path), settings, object_state->typed_path_states[path]); + settings.path.pop_back(); + } + + for (const auto & path : object_state->sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->serializeBinaryBulkStatePrefix(*dynamic_paths.at(path), settings, object_state->dynamic_path_states[path]); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->serializeBinaryBulkStatePrefix(*shared_data, settings, object_state->shared_data_state); settings.path.pop_back(); + settings.path.pop_back(); + + state = std::move(object_state); } -template -void SerializationObject::deserializeBinaryBulkStatePrefix( +void SerializationObject::deserializeBinaryBulkStatePrefix( DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsDeserializeStatesCache * cache) const { - checkSerializationIsSupported(settings); - if (state) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject doesn't support serialization with non-trivial state"); + auto structure_state = deserializeObjectStructureStatePrefix(settings, cache); + if (!structure_state) + return; - settings.path.push_back(Substream::ObjectStructure); - auto * stream = settings.getter(settings.path); - settings.path.pop_back(); - - if (!stream) - throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, - "Cannot read kind of binary serialization of DataTypeObject, because its stream is missing"); - - UInt8 kind_raw; - readIntBinary(kind_raw, *stream); - auto kind = magic_enum::enum_cast(kind_raw); - if (!kind) - throw Exception(ErrorCodes::INCORRECT_DATA, - "Unknown binary serialization kind of Object: {}", std::to_string(kind_raw)); - - auto state_object = std::make_shared(); - state_object->kind = *kind; - - if (state_object->kind == BinarySerializationKind::TUPLE) - { - String data_type_name; - readStringBinary(data_type_name, *stream); - state_object->nested_type = DataTypeFactory::instance().get(data_type_name); - state_object->nested_serialization = state_object->nested_type->getDefaultSerialization(); - - if (!isTuple(state_object->nested_type)) - throw Exception(ErrorCodes::INCORRECT_DATA, - "Data of type Object should be written as Tuple, got: {}", data_type_name); - } - else if (state_object->kind == BinarySerializationKind::STRING) - { - state_object->nested_type = std::make_shared(); - state_object->nested_serialization = std::make_shared(); - } - else - { - throw Exception(ErrorCodes::INCORRECT_DATA, - "Unknown binary serialization kind of Object: {}", std::to_string(kind_raw)); - } + auto object_state = std::make_shared(); + object_state->structure_state = std::move(structure_state); settings.path.push_back(Substream::ObjectData); - state_object->nested_serialization->deserializeBinaryBulkStatePrefix(settings, state_object->nested_state, cache); + + for (const auto & path : sorted_typed_paths) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->deserializeBinaryBulkStatePrefix(settings, object_state->typed_path_states[path], cache); + settings.path.pop_back(); + } + + const auto & sorted_dynamic_paths = checkAndGetState(object_state->structure_state)->sorted_dynamic_paths; + for (const auto & path : sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->deserializeBinaryBulkStatePrefix(settings, object_state->dynamic_path_states[path], cache); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->deserializeBinaryBulkStatePrefix(settings, object_state->shared_data_state, cache); + settings.path.pop_back(); settings.path.pop_back(); - state = std::move(state_object); + state = std::move(object_state); } -template -void SerializationObject::serializeBinaryBulkWithMultipleStreams( +ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeObjectStructureStatePrefix( + DeserializeBinaryBulkSettings & settings, SubstreamsDeserializeStatesCache * cache) +{ + settings.path.push_back(Substream::ObjectStructure); + + DeserializeBinaryBulkStatePtr state = nullptr; + /// Check if we already deserialized this state. It can happen when we read both object column and its subcolumns. + if (auto cached_state = getFromSubstreamsDeserializeStatesCache(cache, settings.path)) + { + state = cached_state; + } + else if (auto * structure_stream = settings.getter(settings.path)) + { + /// Read structure serialization version. + UInt64 serialization_version; + readBinaryLittleEndian(serialization_version, *structure_stream); + auto structure_state = std::make_shared(serialization_version); + /// Read max_dynamic_paths parameter. + readVarUInt(structure_state->max_dynamic_paths, *structure_stream); + /// Read the sorted list of dynamic paths. + size_t dynamic_paths_size; + readVarUInt(dynamic_paths_size, *structure_stream); + structure_state->sorted_dynamic_paths.reserve(dynamic_paths_size); + structure_state->dynamic_paths.reserve(dynamic_paths_size); + for (size_t i = 0; i != dynamic_paths_size; ++i) + { + structure_state->sorted_dynamic_paths.emplace_back(); + readStringBinary(structure_state->sorted_dynamic_paths.back(), *structure_stream); + structure_state->dynamic_paths.insert(structure_state->sorted_dynamic_paths.back()); + } + + /// Read statistics if needed. + if (settings.object_and_dynamic_read_statistics) + { + ColumnObject::Statistics statistics(ColumnObject::Statistics::Source::READ); + statistics.dynamic_paths_statistics.reserve(structure_state->sorted_dynamic_paths.size()); + /// First, read dynamic paths statistics. + for (const auto & path : structure_state->sorted_dynamic_paths) + readVarUInt(statistics.dynamic_paths_statistics[path], *structure_stream); + + /// Second, read shared data paths statistics. + size_t size; + readVarUInt(size, *structure_stream); + statistics.shared_data_paths_statistics.reserve(size); + String path; + for (size_t i = 0; i != size; ++i) + { + readStringBinary(path, *structure_stream); + readVarUInt(statistics.shared_data_paths_statistics[path], *structure_stream); + } + + structure_state->statistics = std::make_shared(std::move(statistics)); + } + + state = std::move(structure_state); + addToSubstreamsDeserializeStatesCache(cache, settings.path, state); + } + + settings.path.pop_back(); + return state; +} + +void SerializationObject::serializeBinaryBulkWithMultipleStreams( const IColumn & column, size_t offset, size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const { - checkSerializationIsSupported(settings); const auto & column_object = assert_cast(column); - auto * state_object = checkAndGetState(state); + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data = column_object.getSharedDataPtr(); + auto * object_state = checkAndGetState(state); - if (!column_object.isFinalized()) - { - auto finalized = column_object.cloneFinalized(); - serializeBinaryBulkWithMultipleStreams(*finalized, offset, limit, settings, state); - return; - } + if (column_object.getMaxDynamicPaths() != object_state->max_dynamic_paths) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_paths parameter of Object. Expected: {}, Got: {}", object_state->max_dynamic_paths, column_object.getMaxDynamicPaths()); - auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); - - if (!state_object->nested_type->equals(*tuple_type)) - { - throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, - "Types of internal column of Object mismatched. Expected: {}, Got: {}", - state_object->nested_type->getName(), tuple_type->getName()); - } + if (column_object.getDynamicPaths().size() != object_state->sorted_dynamic_paths.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of number of dynamic paths in Object. Expected: {}, Got: {}", object_state->sorted_dynamic_paths.size(), column_object.getDynamicPaths().size()); settings.path.push_back(Substream::ObjectData); - if (auto * stream = settings.getter(settings.path)) + + for (const auto & path : sorted_typed_paths) { - state_object->nested_serialization->serializeBinaryBulkWithMultipleStreams( - *tuple_column, offset, limit, settings, state_object->nested_state); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->serializeBinaryBulkWithMultipleStreams(*typed_paths.at(path), offset, limit, settings, object_state->typed_path_states[path]); + settings.path.pop_back(); } + const auto * dynamic_serialization_typed = assert_cast(dynamic_serialization.get()); + for (const auto & path : object_state->sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + auto it = dynamic_paths.find(path); + if (it == dynamic_paths.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Dynamic structure mismatch for Object column: dynamic path '{}' is not found in the column", path); + if (object_state->recalculate_statistics) + { + size_t number_of_non_null_values = 0; + dynamic_serialization_typed->serializeBinaryBulkWithMultipleStreamsAndCountTotalSizeOfVariants(*it->second, offset, limit, settings, object_state->dynamic_path_states[path], number_of_non_null_values); + object_state->statistics.dynamic_paths_statistics[path] += number_of_non_null_values; + } + else + { + dynamic_serialization_typed->serializeBinaryBulkWithMultipleStreams(*it->second, offset, limit, settings, object_state->dynamic_path_states[path]); + } + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->serializeBinaryBulkWithMultipleStreams(*shared_data, offset, limit, settings, object_state->shared_data_state); + if (object_state->recalculate_statistics) + { + /// Calculate statistics for paths in shared data. + const auto [shared_data_paths, _] = column_object.getSharedDataPathsAndValues(); + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + size_t start = shared_data_offsets[offset - 1]; + size_t end = limit == 0 || offset + limit > shared_data_offsets.size() ? shared_data_paths->size() : shared_data_offsets[offset + limit - 1]; + for (size_t i = start; i != end; ++i) + { + auto path = shared_data_paths->getDataAt(i).toView(); + if (auto it = object_state->statistics.shared_data_paths_statistics.find(path); it != object_state->statistics.shared_data_paths_statistics.end()) + ++it->second; + else if (object_state->statistics.shared_data_paths_statistics.size() < ColumnObject::Statistics::MAX_SHARED_DATA_STATISTICS_SIZE) + object_state->statistics.shared_data_paths_statistics.emplace(path, 1); + } + } + settings.path.pop_back(); settings.path.pop_back(); } -template -void SerializationObject::deserializeBinaryBulkWithMultipleStreams( +void SerializationObject::serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state) const +{ + auto * object_state = checkAndGetState(state); + settings.path.push_back(Substream::ObjectStructure); + auto * stream = settings.getter(settings.path); + settings.path.pop_back(); + + if (!stream) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Object column structure during serialization of binary bulk state suffix"); + + /// Write statistics in suffix if needed. + if (settings.object_and_dynamic_write_statistics == SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::SUFFIX) + { + /// First, write dynamic paths statistics. + for (const auto & path : object_state->sorted_dynamic_paths) + writeVarUInt(object_state->statistics.dynamic_paths_statistics[path], *stream); + + /// Second, write shared data paths statistics. + writeVarUInt(object_state->statistics.shared_data_paths_statistics.size(), *stream); + for (const auto & [path, size] : object_state->statistics.shared_data_paths_statistics) + { + writeStringBinary(path, *stream); + writeVarUInt(size, *stream); + } + } + + settings.path.push_back(Substream::ObjectData); + + for (const auto & path : sorted_typed_paths) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->serializeBinaryBulkStateSuffix(settings, object_state->typed_path_states[path]); + settings.path.pop_back(); + } + + for (const auto & path : object_state->sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->serializeBinaryBulkStateSuffix(settings, object_state->dynamic_path_states[path]); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->serializeBinaryBulkStateSuffix(settings, object_state->shared_data_state); + settings.path.pop_back(); + settings.path.pop_back(); +} + +void SerializationObject::deserializeBinaryBulkWithMultipleStreams( ColumnPtr & column, size_t limit, DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsCache * cache) const { - checkSerializationIsSupported(settings); - if (!column->empty()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DataTypeObject cannot be deserialized to non-empty column"); + if (!state) + return; + auto * object_state = checkAndGetState(state); + auto * structure_state = checkAndGetState(object_state->structure_state); auto mutable_column = column->assumeMutable(); auto & column_object = assert_cast(*mutable_column); - auto * state_object = checkAndGetState(state); + /// If it's a new object column, set dynamic paths and statistics. + if (column_object.empty()) + { + column_object.setMaxDynamicPaths(structure_state->max_dynamic_paths); + column_object.setDynamicPaths(structure_state->sorted_dynamic_paths); + column_object.setStatistics(structure_state->statistics); + } + + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + auto & shared_data = column_object.getSharedDataPtr(); settings.path.push_back(Substream::ObjectData); - if (state_object->kind == BinarySerializationKind::STRING) - deserializeBinaryBulkFromString(column_object, limit, settings, *state_object, cache); - else - deserializeBinaryBulkFromTuple(column_object, limit, settings, *state_object, cache); + for (const auto & path : sorted_typed_paths) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + typed_path_serializations.at(path)->deserializeBinaryBulkWithMultipleStreams(typed_paths[path], limit, settings, object_state->typed_path_states[path], cache); + settings.path.pop_back(); + } + for (const auto & path : structure_state->sorted_dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->deserializeBinaryBulkWithMultipleStreams(dynamic_paths[path], limit, settings, object_state->dynamic_path_states[path], cache); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(shared_data, limit, settings, object_state->shared_data_state, cache); + settings.path.pop_back(); settings.path.pop_back(); - column_object.checkConsistency(); - column_object.finalize(); - column = std::move(mutable_column); } -template -void SerializationObject::deserializeBinaryBulkFromString( - ColumnObject & column_object, - size_t limit, - DeserializeBinaryBulkSettings & settings, - DeserializeStateObject & state, - SubstreamsCache * cache) const +void SerializationObject::serializeBinary(const Field & field, WriteBuffer & ostr, const DB::FormatSettings & settings) const { - ColumnPtr column_string = state.nested_type->createColumn(); - state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( - column_string, limit, settings, state.nested_state, cache); - - size_t input_rows_count = column_string->size(); - column_object.reserve(input_rows_count); - - FormatSettings format_settings; - for (size_t i = 0; i < input_rows_count; ++i) + const auto & object = field.safeGet(); + /// Serialize number of paths and then pairs (path, value). + writeVarUInt(object.size(), ostr); + for (const auto & [path, value] : object) { - const auto & val = column_string->getDataAt(i); - ReadBufferFromMemory read_buffer(val.data, val.size); - deserializeWholeText(column_object, read_buffer, format_settings); - - if (!read_buffer.eof()) - throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, - "Cannot parse string to column Object. Expected eof"); - } -} - -template -void SerializationObject::deserializeBinaryBulkFromTuple( - ColumnObject & column_object, - size_t limit, - DeserializeBinaryBulkSettings & settings, - DeserializeStateObject & state, - SubstreamsCache * cache) const -{ - ColumnPtr column_tuple = state.nested_type->createColumn(); - state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( - column_tuple, limit, settings, state.nested_state, cache); - - auto [tuple_paths, tuple_types] = flattenTuple(state.nested_type); - auto flattened_tuple = flattenTuple(column_tuple); - const auto & tuple_columns = assert_cast(*flattened_tuple).getColumns(); - - assert(tuple_paths.size() == tuple_types.size()); - size_t num_subcolumns = tuple_paths.size(); - - if (tuple_columns.size() != num_subcolumns) - throw Exception(ErrorCodes::INCORRECT_DATA, - "Inconsistent type ({}) and column ({}) while reading column of type Object", - state.nested_type->getName(), column_tuple->getName()); - - for (size_t i = 0; i < num_subcolumns; ++i) - column_object.addSubcolumn(tuple_paths[i], tuple_columns[i]->assumeMutable()); -} - -template -void SerializationObject::serializeBinary(const Field &, WriteBuffer &, const FormatSettings &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObject"); -} - -template -void SerializationObject::deserializeBinary(Field &, ReadBuffer &, const FormatSettings &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObject"); -} - -template -void SerializationObject::serializeBinary(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObject"); -} - -template -void SerializationObject::deserializeBinary(IColumn &, ReadBuffer &, const FormatSettings &) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObject"); -} - -/// TODO: use format different of JSON in serializations. - -template -void SerializationObject::serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - const auto & column_object = assert_cast(column); - const auto & subcolumns = column_object.getSubcolumns(); - - writeChar('{', ostr); - for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) - { - const auto & entry = *it; - if (it != subcolumns.begin()) - writeCString(",", ostr); - - writeDoubleQuoted(entry->path.getPath(), ostr); - writeChar(':', ostr); - serializeTextFromSubcolumn(entry->data, row_num, ostr, settings); - } - writeChar('}', ostr); -} - -template -template -void SerializationObject::serializeTextFromSubcolumn( - const ColumnObject::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const -{ - const auto & least_common_type = subcolumn.getLeastCommonType(); - - if (subcolumn.isFinalized()) - { - const auto & finalized_column = subcolumn.getFinalizedColumn(); - auto info = least_common_type->getSerializationInfo(finalized_column); - auto serialization = least_common_type->getSerialization(*info); - if constexpr (pretty_json) - serialization->serializeTextJSONPretty(finalized_column, row_num, ostr, settings, indent); + writeStringBinary(path, ostr); + if (auto it = typed_path_serializations.find(path); it != typed_path_serializations.end()) + it->second->serializeBinary(value, ostr, settings); else - serialization->serializeTextJSON(finalized_column, row_num, ostr, settings); - return; + dynamic_serialization->serializeBinary(value, ostr, settings); + } +} + +void SerializationObject::serializeBinary(const IColumn & col, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + const auto & column_object = assert_cast(col); + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + size_t offset = shared_data_offsets[ssize_t(row_num) - 1]; + size_t end = shared_data_offsets[ssize_t(row_num)]; + + /// Serialize number of paths and then pairs (path, value). + writeVarUInt(typed_paths.size() + dynamic_paths.size() + (end - offset), ostr); + + for (const auto & [path, column] : typed_paths) + { + writeStringBinary(path, ostr); + typed_path_serializations.at(path)->serializeBinary(*column, row_num, ostr, settings); } - size_t ind = row_num; - if (ind < subcolumn.getNumberOfDefaultsInPrefix()) + for (const auto & [path, column] : dynamic_paths) { - /// Suboptimal, but it should happen rarely. - auto tmp_column = subcolumn.getLeastCommonType()->createColumn(); - tmp_column->insertDefault(); - - auto info = least_common_type->getSerializationInfo(*tmp_column); - auto serialization = least_common_type->getSerialization(*info); - if constexpr (pretty_json) - serialization->serializeTextJSONPretty(*tmp_column, 0, ostr, settings, indent); - else - serialization->serializeTextJSON(*tmp_column, 0, ostr, settings); - return; + writeStringBinary(path, ostr); + dynamic_serialization->serializeBinary(*column, row_num, ostr, settings); } - ind -= subcolumn.getNumberOfDefaultsInPrefix(); - for (const auto & part : subcolumn.getData()) + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + for (size_t i = offset; i != end; ++i) { - if (ind < part->size()) + writeStringBinary(shared_data_paths->getDataAt(i), ostr); + auto value = shared_data_values->getDataAt(i); + ostr.write(value.data, value.size); + } +} + +void SerializationObject::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const +{ + Object object; + size_t number_of_paths; + readVarUInt(number_of_paths, istr); + /// Read pairs (path, value). + for (size_t i = 0; i != number_of_paths; ++i) + { + String path; + readStringBinary(path, istr); + if (!shouldSkipPath(path)) { - auto part_type = getDataTypeByColumn(*part); - auto info = part_type->getSerializationInfo(*part); - auto serialization = part_type->getSerialization(*info); - if constexpr (pretty_json) - serialization->serializeTextJSONPretty(*part, ind, ostr, settings, indent); + if (auto it = typed_path_serializations.find(path); it != typed_path_serializations.end()) + it->second->deserializeBinary(object[path], istr, settings); else - serialization->serializeTextJSON(*part, ind, ostr, settings); - return; + dynamic_serialization->deserializeBinary(object[path], istr, settings); + } + else + { + /// Skip value of this path. + Field tmp; + dynamic_serialization->deserializeBinary(tmp, istr, settings); + } + } + + field = std::move(object); +} + +/// Restore column object to the state with previous size. +/// We can use it in case of an exception during deserialization. +void SerializationObject::restoreColumnObject(ColumnObject & column_object, size_t prev_size) +{ + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + auto & shared_data_offsets = column_object.getSharedDataOffsets(); + + for (auto & [_, column] : typed_paths) + { + if (column->size() > prev_size) + column->popBack(column->size() - prev_size); + } + + for (auto & [_, column] : dynamic_paths) + { + if (column->size() > prev_size) + column->popBack(column->size() - prev_size); + } + + if (shared_data_offsets.size() > prev_size) + shared_data_offsets.resize(prev_size); + size_t prev_shared_data_offset = shared_data_offsets.back(); + if (shared_data_paths->size() > prev_shared_data_offset) + shared_data_paths->popBack(shared_data_paths->size() - prev_shared_data_offset); + if (shared_data_values->size() > prev_shared_data_offset) + shared_data_values->popBack(shared_data_values->size() - prev_shared_data_offset); +} + +void SerializationObject::deserializeBinary(IColumn & col, ReadBuffer & istr, const FormatSettings & settings) const +{ + auto & column_object = assert_cast(col); + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + auto & shared_data_offsets = column_object.getSharedDataOffsets(); + + size_t number_of_paths; + readVarUInt(number_of_paths, istr); + std::vector> paths_and_values_for_shared_data; + size_t prev_size = column_object.size(); + try + { + /// Read pairs (path, value). + for (size_t i = 0; i != number_of_paths; ++i) + { + String path; + readStringBinary(path, istr); + if (!shouldSkipPath(path)) + { + /// Check if we have this path in typed paths. + if (auto typed_it = typed_path_serializations.find(path); typed_it != typed_path_serializations.end()) + { + auto & typed_column = typed_paths[path]; + /// Check if we already had this path. + if (typed_column->size() > prev_size) + { + if (!settings.json.type_json_skip_duplicated_paths) + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of JSON type: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", path); + } + else + { + typed_it->second->deserializeBinary(*typed_column, istr, settings); + } + } + /// Check if we have this path in dynamic paths. + else if (auto dynamic_it = dynamic_paths.find(path); dynamic_it != dynamic_paths.end()) + { + /// Check if we already had this path. + if (dynamic_it->second->size() > prev_size) + { + if (!settings.json.type_json_skip_duplicated_paths) + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of JSON type: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", path); + } + + dynamic_serialization->deserializeBinary(*dynamic_it->second, istr, settings); + } + /// Try to add a new dynamic paths. + else if (auto * dynamic_column = column_object.tryToAddNewDynamicPath(path)) + { + dynamic_serialization->deserializeBinary(*dynamic_column, istr, settings); + } + /// Otherwise this path should go to shared data. + else + { + auto tmp_dynamic_column = ColumnDynamic::create(); + tmp_dynamic_column->reserve(1); + String value; + readParsedValueIntoString(value, istr, [&](ReadBuffer & buf){ dynamic_serialization->deserializeBinary(*tmp_dynamic_column, buf, settings); }); + paths_and_values_for_shared_data.emplace_back(std::move(path), std::move(value)); + } + } + else + { + /// Skip value of this path. + Field tmp; + dynamic_serialization->deserializeBinary(tmp, istr, settings); + } } - ind -= part->size(); + std::sort(paths_and_values_for_shared_data.begin(), paths_and_values_for_shared_data.end()); + for (size_t i = 0; i != paths_and_values_for_shared_data.size(); ++i) + { + const auto & [path, value] = paths_and_values_for_shared_data[i]; + if (i != 0 && path == paths_and_values_for_shared_data[i - 1].first) + { + if (!settings.json.type_json_skip_duplicated_paths) + throw Exception(ErrorCodes::INCORRECT_DATA, "Found duplicated path during binary deserialization of JSON type: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", path); + } + else + { + shared_data_paths->insertData(path.data(), path.size()); + shared_data_values->insertData(value.data(), value.size()); + } + } + shared_data_offsets.push_back(shared_data_paths->size()); } - - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for text serialization is out of range", row_num); -} - -template -void SerializationObject::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - serializeTextImpl(column, row_num, ostr, settings); -} - -template -void SerializationObject::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - WriteBufferFromOwnString ostr_str; - serializeTextImpl(column, row_num, ostr_str, settings); - writeEscapedString(ostr_str.str(), ostr); -} - -template -void SerializationObject::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - WriteBufferFromOwnString ostr_str; - serializeTextImpl(column, row_num, ostr_str, settings); - writeQuotedString(ostr_str.str(), ostr); -} - -template -void SerializationObject::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - serializeTextImpl(column, row_num, ostr, settings); -} - -template -void SerializationObject::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - WriteBufferFromOwnString ostr_str; - serializeTextImpl(column, row_num, ostr_str, settings); - writeCSVString(ostr_str.str(), ostr); -} - -template -void SerializationObject::serializeTextMarkdown( - const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - if (settings.markdown.escape_special_characters) + catch (...) { - WriteBufferFromOwnString ostr_str; - serializeTextImpl(column, row_num, ostr_str, settings); - writeMarkdownEscapedString(ostr_str.str(), ostr); + restoreColumnObject(column_object, prev_size); + throw; } - else + + /// Insert default to all remaining typed and dynamic paths. + for (auto & [_, column] : typed_paths) { - serializeTextEscaped(column, row_num, ostr, settings); + if (column->size() == prev_size) + column->insertDefault(); + } + + for (auto & [_, column] : column_object.getDynamicPathsPtrs()) + { + if (column->size() == prev_size) + column->insertDefault(); } } -template -void SerializationObject::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +SerializationPtr SerializationObject::TypedPathSubcolumnCreator::create(const DB::SerializationPtr & prev) const { - const auto & column_object = assert_cast(column); - const auto & subcolumns = column_object.getSubcolumns(); - - writeCString("{\n", ostr); - for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) - { - const auto & entry = *it; - if (it != subcolumns.begin()) - writeCString(",\n", ostr); - - writeChar(' ', (indent + 1) * 4, ostr); - writeDoubleQuoted(entry->path.getPath(), ostr); - writeCString(": ", ostr); - serializeTextFromSubcolumn(entry->data, row_num, ostr, settings, indent + 1); - } - writeChar('\n', ostr); - writeChar(' ', indent * 4, ostr); - writeChar('}', ostr); -} - - -SerializationPtr getObjectSerialization(const String & schema_format) -{ - if (schema_format == "json") - { -#if USE_SIMDJSON - return std::make_shared>>(); -#elif USE_RAPIDJSON - return std::make_shared>>(); -#else - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "To use data type Object with JSON format ClickHouse should be built with Simdjson or Rapidjson"); -#endif - } - - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unknown schema format '{}'", schema_format); + return std::make_shared(prev, path); } } + diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index 4cb7d0ab6a8..62ff9849f45 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -1,34 +1,43 @@ #pragma once #include -#include -#include +#include +#include namespace DB { -/** Serialization for data type Object. - * Supported only text serialization/deserialization. - * and binary bulk serialization/deserialization without position independent - * encoding, i.e. serialization/deserialization into Native format. - */ -template +class SerializationObjectDynamicPath; +class SerializationSubObject; + +/// Class for binary serialization/deserialization of an Object type (currently only JSON). class SerializationObject : public ISerialization { public: - /** In Native format ColumnObject can be serialized - * in two formats: as Tuple or as String. - * The format is the following: - * - * 1 byte -- 0 if Tuple, 1 if String. - * [type_name] -- Only for tuple serialization. - * ... data of internal column ... - * - * ClickHouse client serializazes objects as tuples. - * String serialization exists for clients, which cannot - * do parsing by themselves and they can send raw data as - * string. It will be parsed on the server side. - */ + /// Serialization can change in future. Let's introduce serialization version. + struct ObjectSerializationVersion + { + enum Value + { + BASIC = 0, + }; + + Value value; + + static void checkVersion(UInt64 version); + + explicit ObjectSerializationVersion(UInt64 version); + }; + + SerializationObject( + std::unordered_map typed_path_serializations_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_regexps_to_skip_); + + void enumerateStreams( + EnumerateStreamsSettings & settings, + const StreamCallback & callback, + const SubstreamData & data) const override; void serializeBinaryBulkStatePrefix( const IColumn & column, @@ -63,59 +72,55 @@ public: void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; - void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; - void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - void serializeTextMarkdown(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; - - void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; - void deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; - void deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; - void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; - void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + static void restoreColumnObject(ColumnObject & column_object, size_t prev_size); private: - enum class BinarySerializationKind : UInt8 + friend SerializationObjectDynamicPath; + friend SerializationSubObject; + + /// State of an Object structure. Can be also used during deserializing of Object subcolumns. + struct DeserializeBinaryBulkStateObjectStructure : public ISerialization::DeserializeBinaryBulkState { - TUPLE = 0, - STRING = 1, + ObjectSerializationVersion structure_version; + size_t max_dynamic_paths; + std::vector sorted_dynamic_paths; + std::unordered_set dynamic_paths; + /// Paths statistics. Map (dynamic path) -> (number of non-null values in this path). + ColumnObject::StatisticsPtr statistics; + + explicit DeserializeBinaryBulkStateObjectStructure(UInt64 structure_version_) : structure_version(structure_version_) {} }; - struct SerializeStateObject; - struct DeserializeStateObject; - - void deserializeBinaryBulkFromString( - ColumnObject & column_object, - size_t limit, + static DeserializeBinaryBulkStatePtr deserializeObjectStructureStatePrefix( DeserializeBinaryBulkSettings & settings, - DeserializeStateObject & state, - SubstreamsCache * cache) const; + SubstreamsDeserializeStatesCache * cache); - void deserializeBinaryBulkFromTuple( - ColumnObject & column_object, - size_t limit, - DeserializeBinaryBulkSettings & settings, - DeserializeStateObject & state, - SubstreamsCache * cache) const; + /// Shared data has type Array(Tuple(String, String)). + static const DataTypePtr & getTypeOfSharedData(); - template - void checkSerializationIsSupported(const TSettings & settings) const; + struct TypedPathSubcolumnCreator : public ISubcolumnCreator + { + String path; - template - void deserializeTextImpl(IColumn & column, Reader && reader) const; + explicit TypedPathSubcolumnCreator(const String & path_) : path(path_) {} - void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const; + DataTypePtr create(const DataTypePtr & prev) const override { return prev; } + ColumnPtr create(const ColumnPtr & prev) const override { return prev; } + SerializationPtr create(const SerializationPtr & prev) const override; + }; - template - void serializeTextFromSubcolumn(const ColumnObject::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent = 0) const; +protected: + bool shouldSkipPath(const String & path) const; - /// Pool of parser objects to make SerializationObject thread safe. - mutable SimpleObjectPool parsers_pool; + std::unordered_map typed_path_serializations; + std::unordered_set paths_to_skip; + std::vector sorted_paths_to_skip; + std::list path_regexps_to_skip; + SerializationPtr dynamic_serialization; + +private: + std::vector sorted_typed_paths; + SerializationPtr shared_data_serialization; }; -SerializationPtr getObjectSerialization(const String & schema_format); - } diff --git a/src/DataTypes/Serializations/SerializationObjectDeprecated.cpp b/src/DataTypes/Serializations/SerializationObjectDeprecated.cpp new file mode 100644 index 00000000000..4e9ebf6c03d --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectDeprecated.cpp @@ -0,0 +1,586 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; + extern const int INCORRECT_DATA; + extern const int CANNOT_READ_ALL_DATA; + extern const int ARGUMENT_OUT_OF_BOUND; + extern const int CANNOT_PARSE_TEXT; + extern const int EXPERIMENTAL_FEATURE_ERROR; +} + +template +template +void SerializationObjectDeprecated::deserializeTextImpl(IColumn & column, Reader && reader) const +{ + auto & column_object = assert_cast(column); + + String buf; + reader(buf); + std::optional result; + + /// Treat empty string as an empty object + /// for better CAST from String to Object. + if (!buf.empty()) + { + auto parser = parsers_pool.get([] { return new Parser; }); + result = parser->parse(buf.data(), buf.size()); + } + else + { + result = ParseResult{}; + } + + if (!result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot parse object"); + + auto & [paths, values] = *result; + assert(paths.size() == values.size()); + + size_t old_column_size = column_object.size(); + for (size_t i = 0; i < paths.size(); ++i) + { + auto field_info = getFieldInfo(values[i]); + if (field_info.need_fold_dimension) + values[i] = applyVisitor(FieldVisitorFoldDimension(field_info.num_dimensions), std::move(values[i])); + if (isNothing(field_info.scalar_type)) + continue; + + if (!column_object.hasSubcolumn(paths[i])) + { + if (paths[i].hasNested()) + column_object.addNestedSubcolumn(paths[i], field_info, old_column_size); + else + column_object.addSubcolumn(paths[i], old_column_size); + } + + auto & subcolumn = column_object.getSubcolumn(paths[i]); + assert(subcolumn.size() == old_column_size); + + subcolumn.insert(std::move(values[i]), std::move(field_info)); + } + + /// Insert default values to missed subcolumns. + const auto & subcolumns = column_object.getSubcolumns(); + for (const auto & entry : subcolumns) + { + if (entry->data.size() == old_column_size) + { + bool inserted = column_object.tryInsertDefaultFromNested(entry); + if (!inserted) + entry->data.insertDefault(); + } + } + + column_object.incrementNumRows(); +} + +template +void SerializationObjectDeprecated::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings &) const +{ + deserializeTextImpl(column, [&](String & s) { readStringInto(s, istr); }); +} + +template +void SerializationObjectDeprecated::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + deserializeTextImpl(column, [&](String & s) { settings.tsv.crlf_end_of_line_input ? readEscapedStringCRLF(s, istr) : readEscapedString(s, istr); }); +} + +template +void SerializationObjectDeprecated::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings &) const +{ + deserializeTextImpl(column, [&](String & s) { readQuotedStringInto(s, istr); }); +} + +template +void SerializationObjectDeprecated::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings &) const +{ + deserializeTextImpl(column, [&](String & s) { Parser::readJSON(s, istr); }); +} + +template +void SerializationObjectDeprecated::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const +{ + deserializeTextImpl(column, [&](String & s) { readCSVStringInto(s, istr, settings.csv); }); +} + +template +template +void SerializationObjectDeprecated::checkSerializationIsSupported(const TSettings & settings) const +{ + if (settings.position_independent_encoding) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject doesn't support serialization with position independent encoding"); +} + +template +struct SerializationObjectDeprecated::SerializeStateObject : public ISerialization::SerializeBinaryBulkState +{ + DataTypePtr nested_type; + SerializationPtr nested_serialization; + SerializeBinaryBulkStatePtr nested_state; +}; + +template +struct SerializationObjectDeprecated::DeserializeStateObject : public ISerialization::DeserializeBinaryBulkState +{ + BinarySerializationKind kind; + DataTypePtr nested_type; + SerializationPtr nested_serialization; + DeserializeBinaryBulkStatePtr nested_state; +}; + +template +void SerializationObjectDeprecated::serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const +{ + checkSerializationIsSupported(settings); + if (state) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject doesn't support serialization with non-trivial state"); + + const auto & column_object = assert_cast(column); + if (!column_object.isFinalized()) + { + auto finalized = column_object.cloneFinalized(); + serializeBinaryBulkStatePrefix(*finalized, settings, state); + return; + } + + settings.path.push_back(Substream::DeprecatedObjectStructure); + auto * stream = settings.getter(settings.path); + + if (!stream) + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, "Missing stream for kind of binary serialization"); + + auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); + + writeIntBinary(static_cast(BinarySerializationKind::TUPLE), *stream); + writeStringBinary(tuple_type->getName(), *stream); + + auto state_object = std::make_shared(); + state_object->nested_type = tuple_type; + state_object->nested_serialization = tuple_type->getDefaultSerialization(); + + settings.path.back() = Substream::DeprecatedObjectData; + state_object->nested_serialization->serializeBinaryBulkStatePrefix(*tuple_column, settings, state_object->nested_state); + + state = std::move(state_object); + settings.path.pop_back(); +} + +template +void SerializationObjectDeprecated::serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const +{ + checkSerializationIsSupported(settings); + auto * state_object = checkAndGetState(state); + + settings.path.push_back(Substream::DeprecatedObjectData); + state_object->nested_serialization->serializeBinaryBulkStateSuffix(settings, state_object->nested_state); + settings.path.pop_back(); +} + +template +void SerializationObjectDeprecated::deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const +{ + checkSerializationIsSupported(settings); + if (state) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject doesn't support serialization with non-trivial state"); + + settings.path.push_back(Substream::DeprecatedObjectStructure); + auto * stream = settings.getter(settings.path); + settings.path.pop_back(); + + if (!stream) + throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, + "Cannot read kind of binary serialization of DataTypeObject, because its stream is missing"); + + UInt8 kind_raw; + readIntBinary(kind_raw, *stream); + auto kind = magic_enum::enum_cast(kind_raw); + if (!kind) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Unknown binary serialization kind of Object: {}", std::to_string(kind_raw)); + + auto state_object = std::make_shared(); + state_object->kind = *kind; + + if (state_object->kind == BinarySerializationKind::TUPLE) + { + String data_type_name; + readStringBinary(data_type_name, *stream); + state_object->nested_type = DataTypeFactory::instance().get(data_type_name); + state_object->nested_serialization = state_object->nested_type->getDefaultSerialization(); + + if (!isTuple(state_object->nested_type)) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Data of type Object should be written as Tuple, got: {}", data_type_name); + } + else if (state_object->kind == BinarySerializationKind::STRING) + { + state_object->nested_type = std::make_shared(); + state_object->nested_serialization = std::make_shared(); + } + else + { + throw Exception(ErrorCodes::INCORRECT_DATA, + "Unknown binary serialization kind of Object: {}", std::to_string(kind_raw)); + } + + settings.path.push_back(Substream::DeprecatedObjectData); + state_object->nested_serialization->deserializeBinaryBulkStatePrefix(settings, state_object->nested_state, cache); + settings.path.pop_back(); + + state = std::move(state_object); +} + +template +void SerializationObjectDeprecated::serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const +{ + checkSerializationIsSupported(settings); + const auto & column_object = assert_cast(column); + auto * state_object = checkAndGetState(state); + + if (!column_object.isFinalized()) + { + auto finalized = column_object.cloneFinalized(); + serializeBinaryBulkWithMultipleStreams(*finalized, offset, limit, settings, state); + return; + } + + auto [tuple_column, tuple_type] = unflattenObjectToTuple(column_object); + + if (!state_object->nested_type->equals(*tuple_type)) + { + throw Exception(ErrorCodes::EXPERIMENTAL_FEATURE_ERROR, + "Types of internal column of Object mismatched. Expected: {}, Got: {}", + state_object->nested_type->getName(), tuple_type->getName()); + } + + settings.path.push_back(Substream::DeprecatedObjectData); + if (auto * stream = settings.getter(settings.path)) + { + state_object->nested_serialization->serializeBinaryBulkWithMultipleStreams( + *tuple_column, offset, limit, settings, state_object->nested_state); + } + + settings.path.pop_back(); +} + +template +void SerializationObjectDeprecated::deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const +{ + checkSerializationIsSupported(settings); + if (!column->empty()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "DataTypeObject cannot be deserialized to non-empty column"); + + auto mutable_column = column->assumeMutable(); + auto & column_object = assert_cast(*mutable_column); + auto * state_object = checkAndGetState(state); + + settings.path.push_back(Substream::DeprecatedObjectData); + if (state_object->kind == BinarySerializationKind::STRING) + deserializeBinaryBulkFromString(column_object, limit, settings, *state_object, cache); + else + deserializeBinaryBulkFromTuple(column_object, limit, settings, *state_object, cache); + + settings.path.pop_back(); + column_object.checkConsistency(); + column_object.finalize(); + column = std::move(mutable_column); +} + +template +void SerializationObjectDeprecated::deserializeBinaryBulkFromString( + ColumnObjectDeprecated & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const +{ + ColumnPtr column_string = state.nested_type->createColumn(); + state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( + column_string, limit, settings, state.nested_state, cache); + + size_t input_rows_count = column_string->size(); + column_object.reserve(input_rows_count); + + FormatSettings format_settings; + for (size_t i = 0; i < input_rows_count; ++i) + { + const auto & val = column_string->getDataAt(i); + ReadBufferFromMemory read_buffer(val.data, val.size); + deserializeWholeText(column_object, read_buffer, format_settings); + + if (!read_buffer.eof()) + throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, + "Cannot parse string to column Object. Expected eof"); + } +} + +template +void SerializationObjectDeprecated::deserializeBinaryBulkFromTuple( + ColumnObjectDeprecated & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const +{ + ColumnPtr column_tuple = state.nested_type->createColumn(); + state.nested_serialization->deserializeBinaryBulkWithMultipleStreams( + column_tuple, limit, settings, state.nested_state, cache); + + auto [tuple_paths, tuple_types] = flattenTuple(state.nested_type); + auto flattened_tuple = flattenTuple(column_tuple); + const auto & tuple_columns = assert_cast(*flattened_tuple).getColumns(); + + assert(tuple_paths.size() == tuple_types.size()); + size_t num_subcolumns = tuple_paths.size(); + + if (tuple_columns.size() != num_subcolumns) + throw Exception(ErrorCodes::INCORRECT_DATA, + "Inconsistent type ({}) and column ({}) while reading column of type Object", + state.nested_type->getName(), column_tuple->getName()); + + for (size_t i = 0; i < num_subcolumns; ++i) + column_object.addSubcolumn(tuple_paths[i], tuple_columns[i]->assumeMutable()); +} + +template +void SerializationObjectDeprecated::serializeBinary(const Field &, WriteBuffer &, const FormatSettings &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObjectDeprecated"); +} + +template +void SerializationObjectDeprecated::deserializeBinary(Field &, ReadBuffer &, const FormatSettings &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObjectDeprecated"); +} + +template +void SerializationObjectDeprecated::serializeBinary(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObjectDeprecated"); +} + +template +void SerializationObjectDeprecated::deserializeBinary(IColumn &, ReadBuffer &, const FormatSettings &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for SerializationObjectDeprecated"); +} + +/// TODO: use format different of JSON in serializations. + +template +void SerializationObjectDeprecated::serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + const auto & column_object = assert_cast(column); + const auto & subcolumns = column_object.getSubcolumns(); + + writeChar('{', ostr); + for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) + { + const auto & entry = *it; + if (it != subcolumns.begin()) + writeCString(",", ostr); + + writeDoubleQuoted(entry->path.getPath(), ostr); + writeChar(':', ostr); + serializeTextFromSubcolumn(entry->data, row_num, ostr, settings); + } + writeChar('}', ostr); +} + +template +template +void SerializationObjectDeprecated::serializeTextFromSubcolumn( + const ColumnObjectDeprecated::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + const auto & least_common_type = subcolumn.getLeastCommonType(); + + if (subcolumn.isFinalized()) + { + const auto & finalized_column = subcolumn.getFinalizedColumn(); + auto info = least_common_type->getSerializationInfo(finalized_column); + auto serialization = least_common_type->getSerialization(*info); + if constexpr (pretty_json) + serialization->serializeTextJSONPretty(finalized_column, row_num, ostr, settings, indent); + else + serialization->serializeTextJSON(finalized_column, row_num, ostr, settings); + return; + } + + size_t ind = row_num; + if (ind < subcolumn.getNumberOfDefaultsInPrefix()) + { + /// Suboptimal, but it should happen rarely. + auto tmp_column = subcolumn.getLeastCommonType()->createColumn(); + tmp_column->insertDefault(); + + auto info = least_common_type->getSerializationInfo(*tmp_column); + auto serialization = least_common_type->getSerialization(*info); + if constexpr (pretty_json) + serialization->serializeTextJSONPretty(*tmp_column, 0, ostr, settings, indent); + else + serialization->serializeTextJSON(*tmp_column, 0, ostr, settings); + return; + } + + ind -= subcolumn.getNumberOfDefaultsInPrefix(); + for (const auto & part : subcolumn.getData()) + { + if (ind < part->size()) + { + auto part_type = getDataTypeByColumn(*part); + auto info = part_type->getSerializationInfo(*part); + auto serialization = part_type->getSerialization(*info); + if constexpr (pretty_json) + serialization->serializeTextJSONPretty(*part, ind, ostr, settings, indent); + else + serialization->serializeTextJSON(*part, ind, ostr, settings); + return; + } + + ind -= part->size(); + } + + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Index ({}) for text serialization is out of range", row_num); +} + +template +void SerializationObjectDeprecated::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + serializeTextImpl(column, row_num, ostr, settings); +} + +template +void SerializationObjectDeprecated::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString ostr_str; + serializeTextImpl(column, row_num, ostr_str, settings); + writeEscapedString(ostr_str.str(), ostr); +} + +template +void SerializationObjectDeprecated::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString ostr_str; + serializeTextImpl(column, row_num, ostr_str, settings); + writeQuotedString(ostr_str.str(), ostr); +} + +template +void SerializationObjectDeprecated::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + serializeTextImpl(column, row_num, ostr, settings); +} + +template +void SerializationObjectDeprecated::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + WriteBufferFromOwnString ostr_str; + serializeTextImpl(column, row_num, ostr_str, settings); + writeCSVString(ostr_str.str(), ostr); +} + +template +void SerializationObjectDeprecated::serializeTextMarkdown( + const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + if (settings.markdown.escape_special_characters) + { + WriteBufferFromOwnString ostr_str; + serializeTextImpl(column, row_num, ostr_str, settings); + writeMarkdownEscapedString(ostr_str.str(), ostr); + } + else + { + serializeTextEscaped(column, row_num, ostr, settings); + } +} + +template +void SerializationObjectDeprecated::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + const auto & column_object = assert_cast(column); + const auto & subcolumns = column_object.getSubcolumns(); + + writeCString("{\n", ostr); + for (auto it = subcolumns.begin(); it != subcolumns.end(); ++it) + { + const auto & entry = *it; + if (it != subcolumns.begin()) + writeCString(",\n", ostr); + + writeChar(' ', (indent + 1) * 4, ostr); + writeDoubleQuoted(entry->path.getPath(), ostr); + writeCString(": ", ostr); + serializeTextFromSubcolumn(entry->data, row_num, ostr, settings, indent + 1); + } + writeChar('\n', ostr); + writeChar(' ', indent * 4, ostr); + writeChar('}', ostr); +} + + +SerializationPtr getObjectSerialization(const String & schema_format) +{ + if (schema_format == "json") + { +#if USE_SIMDJSON + return std::make_shared>>(); +#elif USE_RAPIDJSON + return std::make_shared>>(); +#else + throw Exception(ErrorCodes::NOT_IMPLEMENTED, + "To use data type Object with JSON format ClickHouse should be built with Simdjson or Rapidjson"); +#endif + } + + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unknown schema format '{}'", schema_format); +} + +} diff --git a/src/DataTypes/Serializations/SerializationObjectDeprecated.h b/src/DataTypes/Serializations/SerializationObjectDeprecated.h new file mode 100644 index 00000000000..c209f946850 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectDeprecated.h @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +/** Serialization for data type Object (deprecated). + * Supported only text serialization/deserialization. + * and binary bulk serialization/deserialization without position independent + * encoding, i.e. serialization/deserialization into Native format. + */ +template +class SerializationObjectDeprecated : public ISerialization +{ +public: + /** In Native format ColumnObjectDeprecated can be serialized + * in two formats: as Tuple or as String. + * The format is the following: + * + * 1 byte -- 0 if Tuple, 1 if String. + * [type_name] -- Only for tuple serialization. + * ... data of internal column ... + * + * ClickHouse client serializazes objects as tuples. + * String serialization exists for clients, which cannot + * do parsing by themselves and they can send raw data as + * string. It will be parsed on the server side. + */ + + void serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const override; + + void serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const override; + + void serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const override; + void deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const override; + void serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const override; + void deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings &) const override; + + void serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; + void serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextMarkdown(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + + void deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + void deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + void deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + void deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; + +private: + enum class BinarySerializationKind : UInt8 + { + TUPLE = 0, + STRING = 1, + }; + + struct SerializeStateObject; + struct DeserializeStateObject; + + void deserializeBinaryBulkFromString( + ColumnObjectDeprecated & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const; + + void deserializeBinaryBulkFromTuple( + ColumnObjectDeprecated & column_object, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeStateObject & state, + SubstreamsCache * cache) const; + + template + void checkSerializationIsSupported(const TSettings & settings) const; + + template + void deserializeTextImpl(IColumn & column, Reader && reader) const; + + void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const; + + template + void serializeTextFromSubcolumn(const ColumnObjectDeprecated::Subcolumn & subcolumn, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent = 0) const; + + /// Pool of parser objects to make SerializationObjectDeprecated thread safe. + mutable SimpleObjectPool parsers_pool; +}; + +SerializationPtr getObjectSerialization(const String & schema_format); + +} diff --git a/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp new file mode 100644 index 00000000000..5323079c54b --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp @@ -0,0 +1,192 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +SerializationObjectDynamicPath::SerializationObjectDynamicPath( + const DB::SerializationPtr & nested_, const String & path_, const String & path_subcolumn_, size_t max_dynamic_types_) + : SerializationWrapper(nested_) + , path(path_) + , path_subcolumn(path_subcolumn_) + , dynamic_serialization(std::make_shared()) + , shared_data_serialization(SerializationObject::getTypeOfSharedData()->getDefaultSerialization()) + , max_dynamic_types(max_dynamic_types_) +{ +} + +struct DeserializeBinaryBulkStateObjectDynamicPath : public ISerialization::DeserializeBinaryBulkState +{ + ISerialization::DeserializeBinaryBulkStatePtr structure_state; + ISerialization::DeserializeBinaryBulkStatePtr nested_state; + bool read_from_shared_data; + ColumnPtr shared_data; +}; + +void SerializationObjectDynamicPath::enumerateStreams( + DB::ISerialization::EnumerateStreamsSettings & settings, + const DB::ISerialization::StreamCallback & callback, + const DB::ISerialization::SubstreamData & data) const +{ + settings.path.push_back(Substream::ObjectStructure); + callback(settings.path); + settings.path.pop_back(); + + const auto * deserialize_state = data.deserialize_state ? checkAndGetState(data.deserialize_state) : nullptr; + + /// We cannot enumerate anything if we don't have deserialization state, as we don't know the dynamic structure. + if (!deserialize_state) + return; + + settings.path.push_back(Substream::ObjectData); + const auto * structure_state = checkAndGetState(deserialize_state->structure_state); + /// Check if we have our path in dynamic paths. + if (structure_state->dynamic_paths.contains(path)) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(nested_serialization) + .withType(data.type) + .withColumn(data.column) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state->nested_state); + settings.path.back().data = path_data; + nested_serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + } + /// Otherwise we will have to read all shared data and try to find our path there. + else + { + settings.path.push_back(Substream::ObjectSharedData); + auto shared_data_substream_data = SubstreamData(shared_data_serialization) + .withType(data.type ? SerializationObject::getTypeOfSharedData() : nullptr) + .withColumn(data.column ? SerializationObject::getTypeOfSharedData()->createColumn() : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state->nested_state); + settings.path.back().data = shared_data_substream_data; + shared_data_serialization->enumerateStreams(settings, callback, shared_data_substream_data); + settings.path.pop_back(); + } + + settings.path.pop_back(); +} + +void SerializationObjectDynamicPath::serializeBinaryBulkStatePrefix(const IColumn &, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStatePrefix is not implemented for SerializationObjectDynamicPath"); +} + +void SerializationObjectDynamicPath::serializeBinaryBulkStateSuffix(SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStateSuffix is not implemented for SerializationObjectDynamicPath"); +} + +void SerializationObjectDynamicPath::deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsDeserializeStatesCache * cache) const +{ + auto structure_state = SerializationObject::deserializeObjectStructureStatePrefix(settings, cache); + if (!structure_state) + return; + + auto dynamic_path_state = std::make_shared(); + dynamic_path_state->structure_state = std::move(structure_state); + /// Remember if we need to read from shared data or we have this path in dynamic paths. + dynamic_path_state->read_from_shared_data = !checkAndGetState(dynamic_path_state->structure_state)->dynamic_paths.contains(path); + settings.path.push_back(Substream::ObjectData); + if (dynamic_path_state->read_from_shared_data) + { + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_path_state->nested_state, cache); + settings.path.pop_back(); + } + else + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + nested_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_path_state->nested_state, cache); + settings.path.pop_back(); + } + + settings.path.pop_back(); + state = std::move(dynamic_path_state); +} + +void SerializationObjectDynamicPath::serializeBinaryBulkWithMultipleStreams(const IColumn &, size_t, size_t, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkWithMultipleStreams is not implemented for SerializationObjectDynamicPath"); +} + +void SerializationObjectDynamicPath::deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & result_column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const +{ + if (!state) + return; + + auto * dynamic_path_state = checkAndGetState(state); + settings.path.push_back(Substream::ObjectData); + /// Check if we don't need to read shared data. In this case just read data from dynamic path. + if (!dynamic_path_state->read_from_shared_data) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + nested_serialization->deserializeBinaryBulkWithMultipleStreams(result_column, limit, settings, dynamic_path_state->nested_state, cache); + settings.path.pop_back(); + } + /// Otherwise, read the whole shared data column and extract requested path from it. + /// TODO: We can read several subcolumns of the same path located in the shared data + /// and right now we extract the whole path column from shared data every time + /// and then extract the requested subcolumns. We can optimize it and use substreams + /// cache here to avoid extracting the same path from shared data several times. + /// + /// TODO: We can change the serialization of shared data to optimize reading paths from it. + /// Right now we cannot know if shared data contains our path in current range or not, + /// but we can change the serialization and write the list of all paths stored in shared + /// data before each granule, and then replace the column that stores paths with column + /// with indexes in this list. It can also reduce the storage, because we will store + /// each path only once and can replace UInt64 string offset column with indexes column + /// that can have smaller type depending on the number of paths in the list. + else + { + settings.path.push_back(Substream::ObjectSharedData); + /// Initialize shared_data column if needed. + if (result_column->empty()) + dynamic_path_state->shared_data = SerializationObject::getTypeOfSharedData()->createColumn(); + size_t prev_size = result_column->size(); + shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(dynamic_path_state->shared_data, limit, settings, dynamic_path_state->nested_state, cache); + /// If we need to read a subcolumn from Dynamic column, create an empty Dynamic column, fill it and extract subcolumn. + MutableColumnPtr dynamic_column = path_subcolumn.empty() ? result_column->assumeMutable() : ColumnDynamic::create(max_dynamic_types)->getPtr(); + /// Check if we don't have any paths in shared data in current range. + const auto & offsets = assert_cast(*dynamic_path_state->shared_data).getOffsets(); + if (offsets.back() == offsets[ssize_t(prev_size) - 1]) + dynamic_column->insertManyDefaults(limit); + else + ColumnObject::fillPathColumnFromSharedData(*dynamic_column, path, dynamic_path_state->shared_data, prev_size, dynamic_path_state->shared_data->size()); + + /// Extract subcolumn from Dynamic column if needed. + if (!path_subcolumn.empty()) + { + auto subcolumn = std::make_shared(max_dynamic_types)->getSubcolumn(path_subcolumn, dynamic_column->getPtr()); + result_column->assumeMutable()->insertRangeFrom(*subcolumn, 0, subcolumn->size()); + } + + settings.path.pop_back(); + } + + settings.path.pop_back(); +} + +} diff --git a/src/DataTypes/Serializations/SerializationObjectDynamicPath.h b/src/DataTypes/Serializations/SerializationObjectDynamicPath.h new file mode 100644 index 00000000000..e11d0cded73 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectDynamicPath.h @@ -0,0 +1,58 @@ +#pragma once + +#include + +namespace DB +{ + +/// Serialization of dynamic Object paths. +/// For example, if we have type JSON(a.b UInt32, b.c String) and data {"a" : {"b" : 42}, "b" : {"c" : "Hello}, "c" : {"d" : [1, 2, 3]}, "d" : 42} +/// this class will be responsible for reading dynamic paths 'c.d' and 'd' as subcolumns. +/// Typed paths 'a.b' and 'b.c' are serialized in SerializationObjectTypedPath. +class SerializationObjectDynamicPath final : public SerializationWrapper +{ +public: + SerializationObjectDynamicPath(const SerializationPtr & nested_, const String & path_, const String & path_subcolumn_, size_t max_dynamic_types_); + + void enumerateStreams( + EnumerateStreamsSettings & settings, + const StreamCallback & callback, + const SubstreamData & data) const override; + + void serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const override; + + void serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const override; + +private: + String path; + String path_subcolumn; + SerializationPtr dynamic_serialization; + SerializationPtr shared_data_serialization; + size_t max_dynamic_types; +}; + +} diff --git a/src/DataTypes/Serializations/SerializationObjectTypedPath.cpp b/src/DataTypes/Serializations/SerializationObjectTypedPath.cpp new file mode 100644 index 00000000000..ef086d486f7 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectTypedPath.cpp @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + + +void SerializationObjectTypedPath::enumerateStreams( + DB::ISerialization::EnumerateStreamsSettings & settings, + const DB::ISerialization::StreamCallback & callback, + const DB::ISerialization::SubstreamData & data) const +{ + settings.path.push_back(Substream::ObjectData); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(nested_serialization) + .withType(data.type) + .withColumn(data.column) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(data.deserialize_state); + nested_serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + settings.path.pop_back(); +} + +void SerializationObjectTypedPath::serializeBinaryBulkStatePrefix(const IColumn &, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStatePrefix is not implemented for SerializationObjectTypedPath"); +} + +void SerializationObjectTypedPath::serializeBinaryBulkStateSuffix(SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStateSuffix is not implemented for SerializationObjectTypedPath"); +} + +void SerializationObjectTypedPath::deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsDeserializeStatesCache * cache) const +{ + settings.path.push_back(Substream::ObjectData); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + nested_serialization->deserializeBinaryBulkStatePrefix(settings, state, cache); + settings.path.pop_back(); + settings.path.pop_back(); +} + +void SerializationObjectTypedPath::serializeBinaryBulkWithMultipleStreams(const IColumn &, size_t, size_t, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkWithMultipleStreams is not implemented for SerializationObjectTypedPath"); +} + +void SerializationObjectTypedPath::deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & result_column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const +{ + settings.path.push_back(Substream::ObjectData); + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + nested_serialization->deserializeBinaryBulkWithMultipleStreams(result_column, limit, settings, state, cache); + settings.path.pop_back(); + settings.path.pop_back(); +} + +} diff --git a/src/DataTypes/Serializations/SerializationObjectTypedPath.h b/src/DataTypes/Serializations/SerializationObjectTypedPath.h new file mode 100644 index 00000000000..997e14bd145 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationObjectTypedPath.h @@ -0,0 +1,57 @@ +#pragma once + +#include + +namespace DB +{ + +/// Serialization of typed Object paths. +/// For example, for type JSON(a.b UInt32, b.c String) this serialization +/// will be used to read paths 'a.b' and 'b.c' as subcolumns. +class SerializationObjectTypedPath final : public SerializationWrapper +{ +public: + SerializationObjectTypedPath(const SerializationPtr & nested_, const String & path_) + : SerializationWrapper(nested_) + , path(path_) + { + } + + void enumerateStreams( + EnumerateStreamsSettings & settings, + const StreamCallback & callback, + const SubstreamData & data) const override; + + void serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const override; + + void serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const override; + +private: + String path; +}; + +} diff --git a/src/DataTypes/Serializations/SerializationSubObject.cpp b/src/DataTypes/Serializations/SerializationSubObject.cpp new file mode 100644 index 00000000000..9084d46f9b2 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationSubObject.cpp @@ -0,0 +1,259 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +SerializationSubObject::SerializationSubObject( + const String & path_prefix_, const std::unordered_map & typed_paths_serializations_) + : path_prefix(path_prefix_) + , typed_paths_serializations(typed_paths_serializations_) + , dynamic_serialization(std::make_shared()) + , shared_data_serialization(SerializationObject::getTypeOfSharedData()->getDefaultSerialization()) +{ +} + +struct DeserializeBinaryBulkStateSubObject : public ISerialization::DeserializeBinaryBulkState +{ + std::unordered_map typed_path_states; + std::unordered_map dynamic_path_states; + std::vector dynamic_paths; + std::vector dynamic_sub_paths; + ISerialization::DeserializeBinaryBulkStatePtr shared_data_state; + ColumnPtr shared_data; +}; + +void SerializationSubObject::enumerateStreams( + DB::ISerialization::EnumerateStreamsSettings & settings, + const DB::ISerialization::StreamCallback & callback, + const DB::ISerialization::SubstreamData & data) const +{ + settings.path.push_back(Substream::ObjectStructure); + callback(settings.path); + settings.path.pop_back(); + + const auto * column_object = data.column ? &assert_cast(*data.column) : nullptr; + const auto * type_object = data.type ? &assert_cast(*data.type) : nullptr; + const auto * deserialize_state = data.deserialize_state ? checkAndGetState(data.deserialize_state) : nullptr; + + settings.path.push_back(Substream::ObjectData); + + /// typed_paths_serializations contains only typed paths with requested prefix from original Object column. + for (const auto & [path, serialization] : typed_paths_serializations) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(serialization) + .withType(type_object ? type_object->getTypedPaths().at(path.substr(path_prefix.size() + 1)) : nullptr) + .withColumn(column_object ? column_object->getTypedPaths().at(path.substr(path_prefix.size() + 1)) : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->typed_path_states.at(path) : nullptr); + settings.path.back().data = path_data; + serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + } + + /// We will need to read shared data to find all paths with requested prefix. + settings.path.push_back(Substream::ObjectSharedData); + auto shared_data_substream_data = SubstreamData(shared_data_serialization) + .withType(data.type ? SerializationObject::getTypeOfSharedData() : nullptr) + .withColumn(data.column ? SerializationObject::getTypeOfSharedData()->createColumn() : nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(deserialize_state ? deserialize_state->shared_data_state : nullptr); + settings.path.back().data = shared_data_substream_data; + shared_data_serialization->enumerateStreams(settings, callback, shared_data_substream_data); + settings.path.pop_back(); + + /// If deserialize state is provided, enumerate streams for dynamic paths. + if (deserialize_state) + { + DataTypePtr type = std::make_shared(); + for (const auto & [path, state] : deserialize_state->dynamic_path_states) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + auto path_data = SubstreamData(dynamic_serialization) + .withType(type_object ? type : nullptr) + .withColumn(nullptr) + .withSerializationInfo(data.serialization_info) + .withDeserializeState(state); + settings.path.back().data = path_data; + dynamic_serialization->enumerateStreams(settings, callback, path_data); + settings.path.pop_back(); + } + } + + settings.path.pop_back(); +} + +void SerializationSubObject::serializeBinaryBulkStatePrefix(const IColumn &, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStatePrefix is not implemented for SerializationSubObject"); +} + +void SerializationSubObject::serializeBinaryBulkStateSuffix(SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception( + ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkStateSuffix is not implemented for SerializationSubObject"); +} + +namespace +{ + +/// Return sub-path by specified prefix. +/// For example, for prefix a.b: +/// a.b.c.d -> c.d, a.b.c -> c +String getSubPath(const String & path, const String & prefix) +{ + return path.substr(prefix.size() + 1); +} + +std::string_view getSubPath(const std::string_view & path, const String & prefix) +{ + return path.substr(prefix.size() + 1); +} + +} + +void SerializationSubObject::deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, DeserializeBinaryBulkStatePtr & state, SubstreamsDeserializeStatesCache * cache) const +{ + auto structure_state = SerializationObject::deserializeObjectStructureStatePrefix(settings, cache); + if (!structure_state) + return; + + auto sub_object_state = std::make_shared(); + settings.path.push_back(Substream::ObjectData); + for (const auto & [path, serialization] : typed_paths_serializations) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + serialization->deserializeBinaryBulkStatePrefix(settings, sub_object_state->typed_path_states[path], cache); + settings.path.pop_back(); + } + + for (const auto & dynamic_path : checkAndGetState(structure_state)->sorted_dynamic_paths) + { + /// Save only dynamic paths with requested prefix. + if (dynamic_path.starts_with(path_prefix) && dynamic_path.size() != path_prefix.size()) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = dynamic_path; + dynamic_serialization->deserializeBinaryBulkStatePrefix(settings, sub_object_state->dynamic_path_states[dynamic_path], cache); + settings.path.pop_back(); + sub_object_state->dynamic_paths.push_back(dynamic_path); + sub_object_state->dynamic_sub_paths.push_back(getSubPath(dynamic_path, path_prefix)); + } + } + + settings.path.push_back(Substream::ObjectSharedData); + shared_data_serialization->deserializeBinaryBulkStatePrefix(settings, sub_object_state->shared_data_state, cache); + settings.path.pop_back(); + + settings.path.pop_back(); + state = std::move(sub_object_state); +} + +void SerializationSubObject::serializeBinaryBulkWithMultipleStreams(const IColumn &, size_t, size_t, SerializeBinaryBulkSettings &, SerializeBinaryBulkStatePtr &) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method serializeBinaryBulkWithMultipleStreams is not implemented for SerializationSubObject"); +} + +void SerializationSubObject::deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & result_column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const +{ + if (!state) + return; + + auto * sub_object_state = checkAndGetState(state); + auto mutable_column = result_column->assumeMutable(); + auto & column_object = assert_cast(*mutable_column); + /// If it's a new object column, set dynamic paths and statistics. + if (column_object.empty()) + column_object.setDynamicPaths(sub_object_state->dynamic_sub_paths); + + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths = column_object.getDynamicPaths(); + + settings.path.push_back(Substream::ObjectData); + for (const auto & [path, serialization] : typed_paths_serializations) + { + settings.path.push_back(Substream::ObjectTypedPath); + settings.path.back().object_path_name = path; + serialization->deserializeBinaryBulkWithMultipleStreams(typed_paths[getSubPath(path, path_prefix)], limit, settings, sub_object_state->typed_path_states[path], cache); + settings.path.pop_back(); + } + + for (const auto & path : sub_object_state->dynamic_paths) + { + settings.path.push_back(Substream::ObjectDynamicPath); + settings.path.back().object_path_name = path; + dynamic_serialization->deserializeBinaryBulkWithMultipleStreams(dynamic_paths[getSubPath(path, path_prefix)], limit, settings, sub_object_state->dynamic_path_states[path], cache); + settings.path.pop_back(); + } + + settings.path.push_back(Substream::ObjectSharedData); + /// If it's a new object column, reinitialize column for shared data. + if (result_column->empty()) + sub_object_state->shared_data = SerializationObject::getTypeOfSharedData()->createColumn(); + size_t prev_size = column_object.size(); + shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(sub_object_state->shared_data, limit, settings, sub_object_state->shared_data_state, cache); + settings.path.pop_back(); + + auto & sub_object_shared_data = column_object.getSharedDataColumn(); + const auto & offsets = assert_cast(*sub_object_state->shared_data).getOffsets(); + /// Check if there is no data in shared data in current range. + if (offsets.back() == offsets[ssize_t(prev_size) - 1]) + { + sub_object_shared_data.insertManyDefaults(limit); + } + else + { + const auto & shared_data_array = assert_cast(*sub_object_state->shared_data); + const auto & shared_data_offsets = shared_data_array.getOffsets(); + const auto & shared_data_tuple = assert_cast(shared_data_array.getData()); + const auto & shared_data_paths = assert_cast(shared_data_tuple.getColumn(0)); + const auto & shared_data_values = assert_cast(shared_data_tuple.getColumn(1)); + + auto & sub_object_data_offsets = column_object.getSharedDataOffsets(); + auto [sub_object_shared_data_paths, sub_object_shared_data_values] = column_object.getSharedDataPathsAndValues(); + StringRef prefix_ref(path_prefix); + for (size_t i = prev_size; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[ssize_t(i) - 1]; + size_t end = shared_data_offsets[ssize_t(i)]; + size_t lower_bound_index = ColumnObject::findPathLowerBoundInSharedData(prefix_ref, shared_data_paths, start, end); + for (; lower_bound_index != end; ++lower_bound_index) + { + auto path = shared_data_paths.getDataAt(lower_bound_index).toView(); + if (!path.starts_with(path_prefix)) + break; + + /// Don't include path that is equal to the prefix. + if (path.size() != path_prefix.size()) + { + auto sub_path = getSubPath(path, path_prefix); + sub_object_shared_data_paths->insertData(sub_path.data(), sub_path.size()); + sub_object_shared_data_values->insertFrom(shared_data_values, lower_bound_index); + } + } + sub_object_data_offsets.push_back(sub_object_shared_data_paths->size()); + } + } + settings.path.pop_back(); +} + +} diff --git a/src/DataTypes/Serializations/SerializationSubObject.h b/src/DataTypes/Serializations/SerializationSubObject.h new file mode 100644 index 00000000000..10973b48957 --- /dev/null +++ b/src/DataTypes/Serializations/SerializationSubObject.h @@ -0,0 +1,76 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + +/// Serialization of a sub-object Object subcolumns. +/// For example, if we have type JSON and data {"a" : {"b" : {"c" : 42, "d" : "Hello"}}, "c" : [1, 2, 3], "d" : 42} +/// this class will be responsible for reading sub-object a.b and will read JSON column with data {"c" : 43, "d" : "Hello"}. +class SerializationSubObject final : public SimpleTextSerialization +{ +public: + SerializationSubObject(const String & path_prefix_, const std::unordered_map & typed_paths_serializations_); + + void enumerateStreams( + EnumerateStreamsSettings & settings, + const StreamCallback & callback, + const SubstreamData & data) const override; + + void serializeBinaryBulkStatePrefix( + const IColumn & column, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void serializeBinaryBulkStateSuffix( + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkStatePrefix( + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsDeserializeStatesCache * cache) const override; + + void serializeBinaryBulkWithMultipleStreams( + const IColumn & column, + size_t offset, + size_t limit, + SerializeBinaryBulkSettings & settings, + SerializeBinaryBulkStatePtr & state) const override; + + void deserializeBinaryBulkWithMultipleStreams( + ColumnPtr & column, + size_t limit, + DeserializeBinaryBulkSettings & settings, + DeserializeBinaryBulkStatePtr & state, + SubstreamsCache * cache) const override; + + void serializeBinary(const Field &, WriteBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void deserializeBinary(Field &, ReadBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void serializeBinary(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void deserializeBinary(IColumn &, ReadBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void serializeText(const IColumn &, size_t, WriteBuffer &, const FormatSettings &) const override { throwNoSerialization(); } + void deserializeText(IColumn &, ReadBuffer &, const FormatSettings &, bool) const override { throwNoSerialization(); } + bool tryDeserializeText(IColumn &, ReadBuffer &, const FormatSettings &, bool) const override { throwNoSerialization(); } + +private: + [[noreturn]] static void throwNoSerialization() + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Text/binary serialization is not implemented for object sub-object subcolumn"); + } + + String path_prefix; + std::unordered_map typed_paths_serializations; + SerializationPtr dynamic_serialization; + SerializationPtr shared_data_serialization; +}; + +} diff --git a/src/DataTypes/Serializations/SerializationVariant.cpp b/src/DataTypes/Serializations/SerializationVariant.cpp index e4d71e84cc7..0f6a17ef167 100644 --- a/src/DataTypes/Serializations/SerializationVariant.cpp +++ b/src/DataTypes/Serializations/SerializationVariant.cpp @@ -218,7 +218,8 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreamsAndUpdateVarian size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state, - std::unordered_map & variants_statistics) const + std::unordered_map & variants_statistics, + size_t & total_size_of_variants) const { const ColumnVariant & col = assert_cast(column); if (const size_t size = col.size(); limit == 0 || offset + limit > size) @@ -265,6 +266,7 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreamsAndUpdateVarian /// We can use the same offset/limit as for whole Variant column variants[non_empty_global_discr]->serializeBinaryBulkWithMultipleStreams(col.getVariantByGlobalDiscriminator(non_empty_global_discr), offset, limit, settings, variant_state->variant_states[non_empty_global_discr]); variants_statistics[variant_names[non_empty_global_discr]] += limit; + total_size_of_variants += limit; settings.path.pop_back(); settings.path.pop_back(); return; @@ -315,7 +317,9 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreamsAndUpdateVarian { addVariantElementToPath(settings.path, i); variants[i]->serializeBinaryBulkWithMultipleStreams(col.getVariantByGlobalDiscriminator(i), 0, 0, settings, variant_state->variant_states[i]); - variants_statistics[variant_names[i]] += col.getVariantByGlobalDiscriminator(i).size(); + size_t variant_size = col.getVariantByGlobalDiscriminator(i).size(); + variants_statistics[variant_names[i]] += variant_size; + total_size_of_variants += variant_size; settings.path.pop_back(); } settings.path.pop_back(); @@ -386,6 +390,7 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreamsAndUpdateVarian settings, variant_state->variant_states[i]); variants_statistics[variant_names[i]] += variant_offsets_and_limits[i].second; + total_size_of_variants += variant_offsets_and_limits[i].second; settings.path.pop_back(); } } @@ -400,7 +405,8 @@ void SerializationVariant::serializeBinaryBulkWithMultipleStreams( DB::ISerialization::SerializeBinaryBulkStatePtr & state) const { std::unordered_map tmp_statistics; - serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(column, offset, limit, settings, state, tmp_statistics); + size_t tmp_size; + serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(column, offset, limit, settings, state, tmp_statistics, tmp_size); } void SerializationVariant::deserializeBinaryBulkWithMultipleStreams( @@ -1068,6 +1074,16 @@ void SerializationVariant::serializeTextJSON(const IColumn & column, size_t row_ variants[global_discr]->serializeTextJSON(col.getVariantByGlobalDiscriminator(global_discr), col.offsetAt(row_num), ostr, settings); } +void SerializationVariant::serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const +{ + const ColumnVariant & col = assert_cast(column); + auto global_discr = col.globalDiscriminatorAt(row_num); + if (global_discr == ColumnVariant::NULL_DISCRIMINATOR) + SerializationNullable::serializeNullJSON(ostr); + else + variants[global_discr]->serializeTextJSONPretty(col.getVariantByGlobalDiscriminator(global_discr), col.offsetAt(row_num), ostr, settings, indent); +} + bool SerializationVariant::tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const { String field; diff --git a/src/DataTypes/Serializations/SerializationVariant.h b/src/DataTypes/Serializations/SerializationVariant.h index af89632cf81..a76a211e897 100644 --- a/src/DataTypes/Serializations/SerializationVariant.h +++ b/src/DataTypes/Serializations/SerializationVariant.h @@ -113,7 +113,8 @@ public: size_t limit, SerializeBinaryBulkSettings & settings, SerializeBinaryBulkStatePtr & state, - std::unordered_map & variants_statistics) const; + std::unordered_map & variants_statistics, + size_t & total_size_of_variants) const; void deserializeBinaryBulkWithMultipleStreams( ColumnPtr & column, @@ -145,6 +146,7 @@ public: bool tryDeserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; void serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const override; + void serializeTextJSONPretty(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings, size_t indent) const override; void deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; bool tryDeserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const override; diff --git a/src/DataTypes/Serializations/SerializationVariantElement.cpp b/src/DataTypes/Serializations/SerializationVariantElement.cpp index 36dc85f60ee..9ad183a159e 100644 --- a/src/DataTypes/Serializations/SerializationVariantElement.cpp +++ b/src/DataTypes/Serializations/SerializationVariantElement.cpp @@ -193,16 +193,6 @@ void SerializationVariantElement::deserializeBinaryBulkWithMultipleStreams( nested_serialization->deserializeBinaryBulkWithMultipleStreams(variant_element_state->variant, *variant_limit, settings, variant_element_state->variant_element_state, cache); removeVariantFromPath(settings.path); - /// If nothing was deserialized when variant_limit > 0 - /// it means that we don't have a stream for such sub-column. - /// It may happen during ALTER MODIFY column with Variant extension. - /// In this case we should just insert default values. - if (variant_element_state->variant->empty()) - { - mutable_column->insertManyDefaults(num_new_discriminators); - return; - } - /// If there was nothing to deserialize or nothing was actually deserialized when variant_limit > 0, just insert defaults. /// The second case means that we don't have a stream for such sub-column. It may happen during ALTER MODIFY column with Variant extension. if (variant_limit == 0 || variant_element_state->variant->empty()) diff --git a/src/DataTypes/Serializations/tests/gtest_deprecated_object_serialization.cpp b/src/DataTypes/Serializations/tests/gtest_deprecated_object_serialization.cpp new file mode 100644 index 00000000000..ec53df18297 --- /dev/null +++ b/src/DataTypes/Serializations/tests/gtest_deprecated_object_serialization.cpp @@ -0,0 +1,80 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if USE_SIMDJSON + +using namespace DB; + +TEST(SerializationObjectDeprecated, FromString) +{ + WriteBufferFromOwnString out; + + auto column_string = ColumnString::create(); + column_string->insert(R"({"k1" : 1, "k2" : [{"k3" : "aa", "k4" : 2}, {"k3": "bb", "k4": 3}]})"); + column_string->insert(R"({"k1" : 2, "k2" : [{"k3" : "cc", "k5" : 4}, {"k4": 5}, {"k4": 6}]})"); + + { + auto serialization = std::make_shared(); + + ISerialization::SerializeBinaryBulkSettings settings; + ISerialization::SerializeBinaryBulkStatePtr state; + settings.position_independent_encoding = false; + settings.getter = [&out](const auto &) { return &out; }; + + writeIntBinary(static_cast(1), out); + serialization->serializeBinaryBulkStatePrefix(*column_string, settings, state); + serialization->serializeBinaryBulkWithMultipleStreams(*column_string, 0, column_string->size(), settings, state); + serialization->serializeBinaryBulkStateSuffix(settings, state); + } + + auto type_object = std::make_shared("json", false); + ColumnPtr result_column = type_object->createColumn(); + + ReadBufferFromOwnString in(out.str()); + + { + auto serialization = type_object->getDefaultSerialization(); + + ISerialization::DeserializeBinaryBulkSettings settings; + ISerialization::DeserializeBinaryBulkStatePtr state; + settings.position_independent_encoding = false; + settings.getter = [&in](const auto &) { return ∈ }; + + serialization->deserializeBinaryBulkStatePrefix(settings, state, nullptr); + serialization->deserializeBinaryBulkWithMultipleStreams(result_column, column_string->size(), settings, state, nullptr); + } + + auto & column_object = assert_cast(*result_column->assumeMutable()); + column_object.finalize(); + + ASSERT_TRUE(column_object.size() == 2); + ASSERT_TRUE(column_object.getSubcolumns().size() == 4); + + auto check_subcolumn = [&](const auto & name, const auto & type_name, const std::vector & expected) + { + const auto & subcolumn = column_object.getSubcolumn(PathInData{name}); + ASSERT_EQ(subcolumn.getLeastCommonType()->getName(), type_name); + + const auto & data = subcolumn.getFinalizedColumn(); + for (size_t i = 0; i < expected.size(); ++i) + ASSERT_EQ( + applyVisitor(FieldVisitorToString(), data[i]), + applyVisitor(FieldVisitorToString(), expected[i])); + }; + + check_subcolumn("k1", "Int8", {1, 2}); + check_subcolumn("k2.k3", "Array(String)", {Array{"aa", "bb"}, Array{"cc", "", ""}}); + check_subcolumn("k2.k4", "Array(Int8)", {Array{2, 3}, Array{0, 5, 6}}); + check_subcolumn("k2.k5", "Array(Int8)", {Array{0, 0}, Array{4, 0, 0}}); +} + +#endif diff --git a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp index c6337a31fce..f104b75af9b 100644 --- a/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp +++ b/src/DataTypes/Serializations/tests/gtest_object_serialization.cpp @@ -1,80 +1,98 @@ -#include -#include -#include -#include -#include -#include #include -#include -#include +#include +#include +#include #include -#if USE_SIMDJSON - using namespace DB; -TEST(SerializationObject, FromString) +TEST(ObjectSerialization, FieldBinarySerialization) { - WriteBufferFromOwnString out; - - auto column_string = ColumnString::create(); - column_string->insert(R"({"k1" : 1, "k2" : [{"k3" : "aa", "k4" : 2}, {"k3": "bb", "k4": 3}]})"); - column_string->insert(R"({"k1" : 2, "k2" : [{"k3" : "cc", "k5" : 4}, {"k4": 5}, {"k4": 6}]})"); - - { - auto serialization = std::make_shared(); - - ISerialization::SerializeBinaryBulkSettings settings; - ISerialization::SerializeBinaryBulkStatePtr state; - settings.position_independent_encoding = false; - settings.getter = [&out](const auto &) { return &out; }; - - writeIntBinary(static_cast(1), out); - serialization->serializeBinaryBulkStatePrefix(*column_string, settings, state); - serialization->serializeBinaryBulkWithMultipleStreams(*column_string, 0, column_string->size(), settings, state); - serialization->serializeBinaryBulkStateSuffix(settings, state); - } - - auto type_object = std::make_shared("json", false); - ColumnPtr result_column = type_object->createColumn(); - - ReadBufferFromOwnString in(out.str()); - - { - auto serialization = type_object->getDefaultSerialization(); - - ISerialization::DeserializeBinaryBulkSettings settings; - ISerialization::DeserializeBinaryBulkStatePtr state; - settings.position_independent_encoding = false; - settings.getter = [&in](const auto &) { return ∈ }; - - serialization->deserializeBinaryBulkStatePrefix(settings, state, nullptr); - serialization->deserializeBinaryBulkWithMultipleStreams(result_column, column_string->size(), settings, state, nullptr); - } - - auto & column_object = assert_cast(*result_column->assumeMutable()); - column_object.finalize(); - - ASSERT_TRUE(column_object.size() == 2); - ASSERT_TRUE(column_object.getSubcolumns().size() == 4); - - auto check_subcolumn = [&](const auto & name, const auto & type_name, const std::vector & expected) - { - const auto & subcolumn = column_object.getSubcolumn(PathInData{name}); - ASSERT_EQ(subcolumn.getLeastCommonType()->getName(), type_name); - - const auto & data = subcolumn.getFinalizedColumn(); - for (size_t i = 0; i < expected.size(); ++i) - ASSERT_EQ( - applyVisitor(FieldVisitorToString(), data[i]), - applyVisitor(FieldVisitorToString(), expected[i])); - }; - - check_subcolumn("k1", "Int8", {1, 2}); - check_subcolumn("k2.k3", "Array(String)", {Array{"aa", "bb"}, Array{"cc", "", ""}}); - check_subcolumn("k2.k4", "Array(Int8)", {Array{2, 3}, Array{0, 5, 6}}); - check_subcolumn("k2.k5", "Array(Int8)", {Array{0, 0}, Array{4, 0, 0}}); + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, a.b UInt32, a.c Array(String))"); + auto serialization = type->getDefaultSerialization(); + Object object1 = Object{{"a.c", Array{"Str1", "Str2"}}, {"a.d", Field(42)}, {"a.e", Tuple{Field(43), "Str3"}}}; + WriteBufferFromOwnString ostr; + serialization->serializeBinary(object1, ostr, FormatSettings()); + ReadBufferFromString istr(ostr.str()); + Field object2; + serialization->deserializeBinary(object2, istr, FormatSettings()); + ASSERT_EQ(object1, object2.safeGet()); } -#endif + +TEST(ObjectSerialization, ColumnBinarySerialization) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, a.b UInt32, a.c Array(String))"); + auto serialization = type->getDefaultSerialization(); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"a.c", Array{"Str1", "Str2"}}, {"a.d", Field(42)}, {"a.e", Tuple{Field(43), "Str3"}}}); + WriteBufferFromOwnString ostr1; + serialization->serializeBinary(col_object, 0, ostr1, FormatSettings()); + ReadBufferFromString istr1(ostr1.str()); + serialization->deserializeBinary(col_object, istr1, FormatSettings()); + ASSERT_EQ(col_object[0], col_object[1]); + col_object.insert(Object{{"a.c", Array{"Str1", "Str2"}}, {"a.e", Field(42)}, {"b.d", Field(42)}, {"b.e", Tuple{Field(43), "Str3"}}, {"b.g", Field("Str4")}}); + WriteBufferFromOwnString ostr2; + serialization->serializeBinary(col_object, 2, ostr2, FormatSettings()); + ReadBufferFromString istr2(ostr2.str()); + serialization->deserializeBinary(col_object, istr2, FormatSettings()); + ASSERT_EQ(col_object[2], col_object[3]); +} + +TEST(ObjectSerialization, JSONSerialization) +{ + auto type = DataTypeFactory::instance().get("JSON(max_dynamic_types=10, max_dynamic_paths=2, a.b UInt32, a.c Array(String))"); + auto serialization = type->getDefaultSerialization(); + auto col = type->createColumn(); + auto & col_object = assert_cast(*col); + col_object.insert(Object{{"a.c", Array{"Str1", "Str2"}}, {"a.d", Field(42)}, {"a.e", Tuple{Field(43), "Str3"}}}); + col_object.insert(Object{{"a.c", Array{"Str1", "Str2"}}, {"a", Tuple{Field(43), "Str3"}}, {"a.b.c", Field(42)}, {"a.b.e", Field(43)}, {"b.c.d.e", Field(42)}, {"b.c.d.g", Field(43)}, {"b.c.h.r", Field(44)}, {"c.g.h.t", Array{Field("Str"), Field("Str2")}}, {"h", Field("Str")}, {"j", Field("Str")}}); + WriteBufferFromOwnString buf1; + serialization->serializeTextJSON(col_object, 1, buf1, FormatSettings()); + ASSERT_EQ(buf1.str(), R"({"a":[43,"Str3"],"a":{"b":0,"b":{"c":42,"e":43},"c":["Str1","Str2"]},"b":{"c":{"d":{"e":42,"g":43},"h":{"r":44}}},"c":{"g":{"h":{"t":["Str","Str2"]}}},"h":"Str","j":"Str"})"); + WriteBufferFromOwnString buf2; + serialization->serializeTextJSONPretty(col_object, 1, buf2, FormatSettings(), 0); + ASSERT_EQ(buf2.str(), R"({ + "a" : [ + 43, + "Str3" + ], + "a" : { + "b" : 0, + "b" : { + "c" : 42, + "e" : 43 + }, + "c" : [ + "Str1", + "Str2" + ] + }, + "b" : { + "c" : { + "d" : { + "e" : 42, + "g" : 43 + }, + "h" : { + "r" : 44 + } + } + }, + "c" : { + "g" : { + "h" : { + "t" : [ + "Str", + "Str2" + ] + } + } + }, + "h" : "Str", + "j" : "Str" +})"); + +} diff --git a/src/DataTypes/Utils.cpp b/src/DataTypes/Utils.cpp index e7e69e379af..a6e9452d7ef 100644 --- a/src/DataTypes/Utils.cpp +++ b/src/DataTypes/Utils.cpp @@ -216,6 +216,7 @@ bool canBeSafelyCasted(const DataTypePtr & from_type, const DataTypePtr & to_typ return false; } case TypeIndex::String: + case TypeIndex::ObjectDeprecated: case TypeIndex::Object: case TypeIndex::Set: case TypeIndex::Interval: diff --git a/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp b/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp index 4d0bfc67183..789aeac566f 100644 --- a/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp +++ b/src/DataTypes/tests/gtest_data_types_binary_encoding.cpp @@ -126,4 +126,7 @@ GTEST_TEST(DataTypesBinaryEncoding, EncodeAndDecode) check(DataTypeFactory::instance().get("Polygon")); check(DataTypeFactory::instance().get("MultiPolygon")); check(DataTypeFactory::instance().get("Tuple(Map(LowCardinality(String), Array(AggregateFunction(2, quantiles(0.1, 0.2), Float32))), Array(Array(Tuple(UInt32, Tuple(a Map(String, String), b Nullable(Date), c Variant(Tuple(g String, d Array(UInt32)), Date, Map(String, String)))))))")); + check(DataTypeFactory::instance().get("JSON")); + check(DataTypeFactory::instance().get("JSON(max_dynamic_paths=10)")); + check(DataTypeFactory::instance().get("JSON(max_dynamic_paths=10, max_dynamic_types=10, a.b.c UInt32, SKIP a.c, b.g String, SKIP l.d.f)")); } diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index 3fb6d30fcb8..2ccdd8510a8 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -52,7 +52,7 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, void DatabaseLazy::loadStoredObjects(ContextMutablePtr local_context, LoadingStrictnessLevel /*mode*/) { - iterateMetadataFiles(local_context, [this, &local_context](const String & file_name) + iterateMetadataFiles([this, &local_context](const String & file_name) { const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 41cfb751141..aeac130594f 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -12,7 +12,7 @@ class DatabaseLazyIterator; class Context; /** Lazy engine of databases. - * Works like DatabaseOrdinary, but stores in memory only the cache. + * Works like DatabaseOrdinary, but stores only recently accessed tables in memory. * Can be used only with *Log engines. */ class DatabaseLazy final : public DatabaseOnDisk diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 734f354d9a5..81378fc1c64 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -504,7 +504,7 @@ void DatabaseOnDisk::renameTable( } -/// It returns create table statement (even if table is detached) +/// It returns the create table statement (even if table is detached) ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, ContextPtr, bool throw_on_error) const { ASTPtr ast; @@ -568,14 +568,14 @@ void DatabaseOnDisk::drop(ContextPtr local_context) assert(TSA_SUPPRESS_WARNING_FOR_READ(tables).empty()); if (local_context->getSettingsRef().force_remove_data_recursively_on_drop) { - (void)fs::remove_all(local_context->getPath() + getDataPath()); + (void)fs::remove_all(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove_all(getMetadataPath()); } else { try { - (void)fs::remove(local_context->getPath() + getDataPath()); + (void)fs::remove(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove(getMetadataPath()); } catch (const fs::filesystem_error & e) @@ -613,7 +613,7 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n } } -void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const +void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const { auto process_tmp_drop_metadata_file = [&](const String & file_name) { @@ -621,7 +621,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat static const char * tmp_drop_ext = ".sql.tmp_drop"; const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext)); - if (fs::exists(local_context->getPath() + getDataPath() + '/' + object_name)) + if (fs::exists(std::filesystem::path(getContext()->getPath()) / data_path / object_name)) { fs::rename(getMetadataPath() + file_name, getMetadataPath() + object_name + ".sql"); LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name)); @@ -638,7 +638,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat std::vector> metadata_files; fs::directory_iterator dir_end; - for (fs::directory_iterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it) + for (fs::directory_iterator dir_it(metadata_path); dir_it != dir_end; ++dir_it) { String file_name = dir_it->path().filename(); /// For '.svn', '.gitignore' directory and similar. diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 12656068643..ffc95a7c128 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -64,7 +64,7 @@ public: time_t getObjectMetadataModificationTime(const String & object_name) const override; String getDataPath() const override { return data_path; } - String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } + String getTableDataPath(const String & table_name) const override { return std::filesystem::path(data_path) / escapeForFileName(table_name) / ""; } String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } @@ -83,7 +83,7 @@ protected: using IteratingFunction = std::function; - void iterateMetadataFiles(ContextPtr context, const IteratingFunction & process_metadata_file) const; + void iterateMetadataFiles(const IteratingFunction & process_metadata_file) const; ASTPtr getCreateTableQueryImpl( const String & table_name, diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 8808261654f..dd8a3f42ea8 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -55,7 +55,7 @@ static constexpr size_t METADATA_FILE_BUFFER_SIZE = 32768; static constexpr const char * const CONVERT_TO_REPLICATED_FLAG_NAME = "convert_to_replicated"; DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata_path_, ContextPtr context_) - : DatabaseOrdinary(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseOrdinary (" + name_ + ")", context_) + : DatabaseOrdinary(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseOrdinary (" + name_ + ")", context_) { } @@ -265,7 +265,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables } }; - iterateMetadataFiles(local_context, process_metadata); + iterateMetadataFiles(process_metadata); size_t objects_in_database = metadata.parsed_tables.size() - prev_tables_count; size_t dictionaries_in_database = metadata.total_dictionaries - prev_total_dictionaries; diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 85217ba5c04..8e3378bcc12 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -1142,38 +1143,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep /// We will execute some CREATE queries for recovery (not ATTACH queries), /// so we need to allow experimental features that can be used in a CREATE query - query_context->setSetting("allow_experimental_inverted_index", 1); - query_context->setSetting("allow_experimental_full_text_index", 1); - query_context->setSetting("allow_experimental_codecs", 1); - query_context->setSetting("allow_experimental_live_view", 1); - query_context->setSetting("allow_experimental_window_view", 1); - query_context->setSetting("allow_experimental_funnel_functions", 1); - query_context->setSetting("allow_experimental_nlp_functions", 1); - query_context->setSetting("allow_experimental_hash_functions", 1); - query_context->setSetting("allow_experimental_object_type", 1); - query_context->setSetting("allow_experimental_variant_type", 1); - query_context->setSetting("allow_experimental_dynamic_type", 1); - query_context->setSetting("allow_experimental_vector_similarity_index", 1); - query_context->setSetting("allow_experimental_bigint_types", 1); - query_context->setSetting("allow_experimental_window_functions", 1); - query_context->setSetting("allow_experimental_geo_types", 1); - query_context->setSetting("allow_experimental_map_type", 1); - query_context->setSetting("allow_deprecated_error_prone_window_functions", 1); - - query_context->setSetting("allow_suspicious_low_cardinality_types", 1); - query_context->setSetting("allow_suspicious_fixed_string_types", 1); - query_context->setSetting("allow_suspicious_indices", 1); - query_context->setSetting("allow_suspicious_codecs", 1); - query_context->setSetting("allow_hyperscan", 1); - query_context->setSetting("allow_simdjson", 1); - query_context->setSetting("allow_deprecated_syntax_for_merge_tree", 1); - query_context->setSetting("allow_suspicious_primary_key", 1); - query_context->setSetting("allow_suspicious_ttl_expressions", 1); - query_context->setSetting("allow_suspicious_variant_types", 1); - query_context->setSetting("enable_deflate_qpl_codec", 1); - query_context->setSetting("enable_zstd_qat_codec", 1); - query_context->setSetting("allow_create_index_without_type", 1); - query_context->setSetting("allow_experimental_s3queue", 1); + enableAllExperimentalSettings(query_context); auto txn = std::make_shared(current_zookeeper, zookeeper_path, false, ""); query_context->initZooKeeperMetadataTransaction(txn); diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 801356b3dd7..495733e15fd 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -14,6 +14,8 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int CANNOT_GET_CREATE_TABLE_QUERY; + extern const int BAD_ARGUMENTS; + extern const int UNKNOWN_TABLE; } DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_) @@ -124,6 +126,39 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab getEngineName()); } +void DatabasesOverlay::renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) +{ + for (auto & db : databases) + { + if (db->isTableExist(name, current_context)) + { + if (DatabasesOverlay * to_overlay_database = typeid_cast(&to_database)) + { + /// Renaming from Overlay database inside itself or into another Overlay database. + /// Just use the first database in the overlay as a destination. + if (to_overlay_database->databases.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The destination Overlay database {} does not have any members", to_database.getDatabaseName()); + + db->renameTable(current_context, name, *to_overlay_database->databases[0], to_name, exchange, dictionary); + } + else + { + /// Renaming into a different type of database. E.g. from Overlay on top of Atomic database into just Atomic database. + db->renameTable(current_context, name, to_database, to_name, exchange, dictionary); + } + + return; + } + } + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(getDatabaseName()), backQuote(name)); +} + ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const { ASTPtr result = nullptr; @@ -178,6 +213,18 @@ String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const return result; } +UUID DatabasesOverlay::getUUID() const +{ + UUID result = UUIDHelpers::Nil; + for (const auto & db : databases) + { + result = db->getUUID(); + if (result != UUIDHelpers::Nil) + break; + } + return result; +} + UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const { UUID result = UUIDHelpers::Nil; diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h index b0c7e7e4032..40c653e5cb5 100644 --- a/src/Databases/DatabasesOverlay.h +++ b/src/Databases/DatabasesOverlay.h @@ -35,12 +35,21 @@ public: StoragePtr detachTable(ContextPtr context, const String & table_name) override; + void renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) override; + ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override; ASTPtr getCreateDatabaseQuery() const override; String getTableDataPath(const String & table_name) const override; String getTableDataPath(const ASTCreateQuery & query) const override; + UUID getUUID() const override; UUID tryGetTableUUID(const String & table_name) const override; void drop(ContextPtr context) override; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 943f3ae502e..b9fd9c325f8 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -196,7 +196,7 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( } else { - std::tuple row; + std::tuple row; while (stream >> row) { const auto column_name = std::get<0>(row); @@ -206,13 +206,14 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( std::get<3>(row)); columns.push_back(NameAndTypePair(column_name, data_type)); - auto attgenerated = std::get<6>(row); + auto attgenerated = std::get<7>(row); attributes.emplace( column_name, PostgreSQLTableStructure::PGAttribute{ .atttypid = parse(std::get<4>(row)), .atttypmod = parse(std::get<5>(row)), + .attnum = parse(std::get<6>(row)), .atthasdef = false, .attgenerated = attgenerated.empty() ? char{} : char(attgenerated[0]), .attr_def = {} @@ -308,6 +309,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "attndims AS dims, " /// array dimensions "atttypid as type_id, " "atttypmod as type_modifier, " + "attnum as att_num, " "attgenerated as generated " /// if column has GENERATED "FROM pg_attribute " "WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) " @@ -338,17 +340,29 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "WHERE adrelid = (SELECT oid FROM pg_class WHERE {});", where); pqxx::result result{tx.exec(attrdef_query)}; - for (const auto row : result) + if (static_cast(result.size()) > table.physical_columns->names.size()) { - size_t adnum = row[0].as(); - if (!adnum || adnum > table.physical_columns->names.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Received {} attrdef, but currently fetched columns list has {} columns", + result.size(), table.physical_columns->attributes.size()); + } + + for (const auto & column_attrs : table.physical_columns->attributes) + { + if (column_attrs.second.attgenerated != 's') /// e.g. not a generated column { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Received adnum {}, but currently fetched columns list has {} columns", - adnum, table.physical_columns->attributes.size()); + continue; + } + + for (const auto row : result) + { + int adnum = row[0].as(); + if (column_attrs.second.attnum == adnum) + { + table.physical_columns->attributes.at(column_attrs.first).attr_def = row[1].as(); + break; + } } - const auto column_name = table.physical_columns->names[adnum - 1]; - table.physical_columns->attributes.at(column_name).attr_def = row[1].as(); } } diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index 81bf7b278fc..25ece6909fd 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -16,6 +16,7 @@ struct PostgreSQLTableStructure { Int32 atttypid; Int32 atttypmod; + Int32 attnum; bool atthasdef; char attgenerated; std::string attr_def; diff --git a/src/Databases/TablesLoader.h b/src/Databases/TablesLoader.h index 26b5777f1a9..bf469d83245 100644 --- a/src/Databases/TablesLoader.h +++ b/src/Databases/TablesLoader.h @@ -1,4 +1,5 @@ #pragma once + #include #include #include diff --git a/src/Databases/enableAllExperimentalSettings.cpp b/src/Databases/enableAllExperimentalSettings.cpp new file mode 100644 index 00000000000..9abe05d7bce --- /dev/null +++ b/src/Databases/enableAllExperimentalSettings.cpp @@ -0,0 +1,49 @@ +#include + +namespace DB +{ + +/* + * Enables all settings that allow the use of experimental, deprecated, or potentially unsafe features + * in a CREATE query. This function is used in DatabaseReplicated::recoverLostReplica() to create tables + * when the original settings used to create the table are not available. + */ + +void enableAllExperimentalSettings(ContextMutablePtr context) +{ + context->setSetting("allow_experimental_inverted_index", 1); + context->setSetting("allow_experimental_full_text_index", 1); + context->setSetting("allow_experimental_codecs", 1); + context->setSetting("allow_experimental_live_view", 1); + context->setSetting("allow_experimental_window_view", 1); + context->setSetting("allow_experimental_funnel_functions", 1); + context->setSetting("allow_experimental_nlp_functions", 1); + context->setSetting("allow_experimental_hash_functions", 1); + context->setSetting("allow_experimental_object_type", 1); + context->setSetting("allow_experimental_variant_type", 1); + context->setSetting("allow_experimental_dynamic_type", 1); + context->setSetting("allow_experimental_json_type", 1); + context->setSetting("allow_experimental_vector_similarity_index", 1); + context->setSetting("allow_experimental_bigint_types", 1); + context->setSetting("allow_experimental_window_functions", 1); + context->setSetting("allow_experimental_geo_types", 1); + context->setSetting("allow_experimental_map_type", 1); + context->setSetting("allow_deprecated_error_prone_window_functions", 1); + + context->setSetting("allow_suspicious_low_cardinality_types", 1); + context->setSetting("allow_suspicious_fixed_string_types", 1); + context->setSetting("allow_suspicious_indices", 1); + context->setSetting("allow_suspicious_codecs", 1); + context->setSetting("allow_hyperscan", 1); + context->setSetting("allow_simdjson", 1); + context->setSetting("allow_deprecated_syntax_for_merge_tree", 1); + context->setSetting("allow_suspicious_primary_key", 1); + context->setSetting("allow_suspicious_ttl_expressions", 1); + context->setSetting("allow_suspicious_variant_types", 1); + context->setSetting("enable_deflate_qpl_codec", 1); + context->setSetting("enable_zstd_qat_codec", 1); + context->setSetting("allow_create_index_without_type", 1); + context->setSetting("allow_experimental_s3queue", 1); +} + +} diff --git a/src/Databases/enableAllExperimentalSettings.h b/src/Databases/enableAllExperimentalSettings.h new file mode 100644 index 00000000000..ec3bfb98843 --- /dev/null +++ b/src/Databases/enableAllExperimentalSettings.h @@ -0,0 +1,15 @@ +#pragma once +#include + +namespace DB +{ + +/* + * Enables all settings that allow the use of experimental, deprecated, or potentially unsafe features + * in a CREATE query. This function is used in DatabaseReplicated::recoverLostReplica() to create tables + * when the original settings used to create the table are not available. + */ + +void enableAllExperimentalSettings(ContextMutablePtr context); + +} diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index bf16f315ddf..b36d53a6159 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -51,6 +51,8 @@ namespace configuration.db, configuration.user, configuration.password, + configuration.proto_send_chunked, + configuration.proto_recv_chunked, configuration.quota_key, "", /* cluster */ "", /* cluster_secret */ @@ -222,7 +224,7 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) { validateNamedCollection( *named_collection, {}, ValidateKeysMultiset{ - "secure", "host", "hostname", "port", "user", "username", "password", "quota_key", "name", + "secure", "host", "hostname", "port", "user", "username", "password", "proto_send_chunked", "proto_recv_chunked", "quota_key", "name", "db", "database", "table","query", "where", "invalidate_query", "update_field", "update_lag"}); const auto secure = named_collection->getOrDefault("secure", false); @@ -234,6 +236,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = named_collection->getAnyOrDefault({"user", "username"}, "default"), .password = named_collection->getOrDefault("password", ""), + .proto_send_chunked = named_collection->getOrDefault("proto_send_chunked", "notchunked"), + .proto_recv_chunked = named_collection->getOrDefault("proto_recv_chunked", "notchunked"), .quota_key = named_collection->getOrDefault("quota_key", ""), .db = named_collection->getAnyOrDefault({"db", "database"}, default_database), .table = named_collection->getOrDefault("table", ""), @@ -258,6 +262,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = config.getString(settings_config_prefix + ".user", "default"), .password = config.getString(settings_config_prefix + ".password", ""), + .proto_send_chunked = config.getString(settings_config_prefix + ".proto_caps.send", "notchunked"), + .proto_recv_chunked = config.getString(settings_config_prefix + ".proto_caps.recv", "notchunked"), .quota_key = config.getString(settings_config_prefix + ".quota_key", ""), .db = config.getString(settings_config_prefix + ".db", default_database), .table = config.getString(settings_config_prefix + ".table", ""), diff --git a/src/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h index 3357514eab2..faf9e5f8009 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.h +++ b/src/Dictionaries/ClickHouseDictionarySource.h @@ -23,6 +23,8 @@ public: const std::string host; const std::string user; const std::string password; + const std::string proto_send_chunked; + const std::string proto_recv_chunked; const std::string quota_key; const std::string db; const std::string table; diff --git a/src/Dictionaries/HTTPDictionarySource.cpp b/src/Dictionaries/HTTPDictionarySource.cpp index 663c63dd6c6..bf19f912723 100644 --- a/src/Dictionaries/HTTPDictionarySource.cpp +++ b/src/Dictionaries/HTTPDictionarySource.cpp @@ -8,12 +8,12 @@ #include #include #include -#include #include #include #include "DictionarySourceFactory.h" #include "DictionarySourceHelpers.h" #include "DictionaryStructure.h" +#include #include "registerDictionaries.h" @@ -223,21 +223,23 @@ void registerDictionarySourceHTTP(DictionarySourceFactory & factory) String endpoint; String format; - auto named_collection = created_from_ddl - ? getURLBasedDataSourceConfiguration(config, settings_config_prefix, global_context) - : std::nullopt; + auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, global_context) : nullptr; if (named_collection) { - url = named_collection->configuration.url; - endpoint = named_collection->configuration.endpoint; - format = named_collection->configuration.format; + validateNamedCollection( + *named_collection, + /* required_keys */{}, + /* optional_keys */ValidateKeysMultiset{ + "url", "endpoint", "user", "credentials.user", "password", "credentials.password", "format", "compression_method", "structure", "name"}); - credentials.setUsername(named_collection->configuration.user); - credentials.setPassword(named_collection->configuration.password); + url = named_collection->getOrDefault("url", ""); + endpoint = named_collection->getOrDefault("endpoint", ""); + format = named_collection->getOrDefault("format", ""); - header_entries.reserve(named_collection->configuration.headers.size()); - for (const auto & [key, value] : named_collection->configuration.headers) - header_entries.emplace_back(key, value); + credentials.setUsername(named_collection->getAnyOrDefault({"user", "credentials.user"}, "")); + credentials.setPassword(named_collection->getAnyOrDefault({"password", "credentials.password"}, "")); + + header_entries = getHeadersFromNamedCollection(*named_collection); } else { diff --git a/src/Dictionaries/MongoDBDictionarySource.cpp b/src/Dictionaries/MongoDBDictionarySource.cpp index c30a6f90e44..7bacfdab3d2 100644 --- a/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/src/Dictionaries/MongoDBDictionarySource.cpp @@ -1,15 +1,12 @@ #include "MongoDBDictionarySource.h" #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" -#include #include +#include namespace DB { -static const std::unordered_set dictionary_allowed_keys = { - "host", "port", "user", "password", "db", "database", "uri", "collection", "name", "method", "options"}; - void registerDictionarySourceMongoDB(DictionarySourceFactory & factory) { auto create_mongo_db_dictionary = []( @@ -22,35 +19,53 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory) bool created_from_ddl) { const auto config_prefix = root_config_prefix + ".mongodb"; - ExternalDataSourceConfiguration configuration; - auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key); }; - auto named_collection = getExternalDataSourceConfiguration(config, config_prefix, context, has_config_key); + auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, config_prefix, context) : nullptr; + + String host, username, password, database, method, options, collection; + UInt16 port; if (named_collection) { - configuration = named_collection->configuration; + validateNamedCollection( + *named_collection, + /* required_keys */{"collection"}, + /* optional_keys */ValidateKeysMultiset{ + "host", "port", "user", "password", "db", "database", "uri", "name", "method", "options"}); + + host = named_collection->getOrDefault("host", ""); + port = static_cast(named_collection->getOrDefault("port", 0)); + username = named_collection->getOrDefault("user", ""); + password = named_collection->getOrDefault("password", ""); + database = named_collection->getAnyOrDefault({"db", "database"}, ""); + method = named_collection->getOrDefault("method", ""); + collection = named_collection->getOrDefault("collection", ""); + options = named_collection->getOrDefault("options", ""); } else { - configuration.host = config.getString(config_prefix + ".host", ""); - configuration.port = config.getUInt(config_prefix + ".port", 0); - configuration.username = config.getString(config_prefix + ".user", ""); - configuration.password = config.getString(config_prefix + ".password", ""); - configuration.database = config.getString(config_prefix + ".db", ""); + host = config.getString(config_prefix + ".host", ""); + port = config.getUInt(config_prefix + ".port", 0); + username = config.getString(config_prefix + ".user", ""); + password = config.getString(config_prefix + ".password", ""); + database = config.getString(config_prefix + ".db", ""); + method = config.getString(config_prefix + ".method", ""); + collection = config.getString(config_prefix + ".collection"); + options = config.getString(config_prefix + ".options", ""); } if (created_from_ddl) - context->getRemoteHostFilter().checkHostAndPort(configuration.host, toString(configuration.port)); + context->getRemoteHostFilter().checkHostAndPort(host, toString(port)); - return std::make_unique(dict_struct, + return std::make_unique( + dict_struct, config.getString(config_prefix + ".uri", ""), - configuration.host, - configuration.port, - configuration.username, - configuration.password, - config.getString(config_prefix + ".method", ""), - configuration.database, - config.getString(config_prefix + ".collection"), - config.getString(config_prefix + ".options", ""), + host, + port, + username, + password, + method, + database, + collection, + options, sample_block); }; diff --git a/src/Dictionaries/PostgreSQLDictionarySource.cpp b/src/Dictionaries/PostgreSQLDictionarySource.cpp index f62a9a009d8..b1bab17e2e9 100644 --- a/src/Dictionaries/PostgreSQLDictionarySource.cpp +++ b/src/Dictionaries/PostgreSQLDictionarySource.cpp @@ -4,6 +4,7 @@ #include #include #include "DictionarySourceFactory.h" +#include #include "registerDictionaries.h" #if USE_LIBPQXX @@ -13,7 +14,6 @@ #include "readInvalidateQuery.h" #include #include -#include #include #endif @@ -24,16 +24,17 @@ namespace DB namespace ErrorCodes { extern const int SUPPORT_IS_DISABLED; + extern const int BAD_ARGUMENTS; } +static const ValidateKeysMultiset dictionary_allowed_keys = { + "host", "port", "user", "password", "db", "database", "table", "schema", + "update_field", "update_lag", "invalidate_query", "query", "where", "name", "priority"}; + #if USE_LIBPQXX static const UInt64 max_block_size = 8192; -static const std::unordered_set dictionary_allowed_keys = { - "host", "port", "user", "password", "db", "database", "table", "schema", - "update_field", "update_lag", "invalidate_query", "query", "where", "name", "priority"}; - namespace { ExternalQueryBuilder makeExternalQueryBuilder(const DictionaryStructure & dict_struct, const String & schema, const String & table, const String & query, const String & where) @@ -177,6 +178,19 @@ std::string PostgreSQLDictionarySource::toString() const return "PostgreSQL: " + configuration.db + '.' + configuration.table + (where.empty() ? "" : ", where: " + where); } +static void validateConfigKeys( + const Poco::Util::AbstractConfiguration & dict_config, const String & config_prefix) +{ + Poco::Util::AbstractConfiguration::Keys config_keys; + dict_config.keys(config_prefix, config_keys); + for (const auto & config_key : config_keys) + { + if (dictionary_allowed_keys.contains(config_key) || startsWith(config_key, "replica")) + continue; + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected key `{}` in dictionary source configuration", config_key); + } +} + #endif void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory) @@ -191,38 +205,117 @@ void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory) { #if USE_LIBPQXX const auto settings_config_prefix = config_prefix + ".postgresql"; - auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key) || key.starts_with("replica"); }; - auto configuration = getExternalDataSourceConfigurationByPriority(config, settings_config_prefix, context, has_config_key); const auto & settings = context->getSettingsRef(); + std::optional dictionary_configuration; + postgres::PoolWithFailover::ReplicasConfigurationByPriority replicas_by_priority; + + auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, context) : nullptr; + if (named_collection) + { + validateNamedCollection>(*named_collection, {}, dictionary_allowed_keys); + + StoragePostgreSQL::Configuration common_configuration; + common_configuration.host = named_collection->getOrDefault("host", ""); + common_configuration.port = named_collection->getOrDefault("port", 0); + common_configuration.username = named_collection->getOrDefault("user", ""); + common_configuration.password = named_collection->getOrDefault("password", ""); + common_configuration.database = named_collection->getAnyOrDefault({"database", "db"}, ""); + common_configuration.schema = named_collection->getOrDefault("schema", ""); + common_configuration.table = named_collection->getOrDefault("table", ""); + + dictionary_configuration.emplace(PostgreSQLDictionarySource::Configuration{ + .db = common_configuration.database, + .schema = common_configuration.schema, + .table = common_configuration.table, + .query = named_collection->getOrDefault("query", ""), + .where = named_collection->getOrDefault("where", ""), + .invalidate_query = named_collection->getOrDefault("invalidate_query", ""), + .update_field = named_collection->getOrDefault("update_field", ""), + .update_lag = named_collection->getOrDefault("update_lag", 1), + }); + + replicas_by_priority[0].emplace_back(common_configuration); + } + else + { + validateConfigKeys(config, settings_config_prefix); + + StoragePostgreSQL::Configuration common_configuration; + common_configuration.host = config.getString(settings_config_prefix + ".host", ""); + common_configuration.port = config.getUInt(settings_config_prefix + ".port", 0); + common_configuration.username = config.getString(settings_config_prefix + ".user", ""); + common_configuration.password = config.getString(settings_config_prefix + ".password", ""); + common_configuration.database = config.getString(fmt::format("{}.database", settings_config_prefix), config.getString(fmt::format("{}.db", settings_config_prefix), "")); + common_configuration.schema = config.getString(fmt::format("{}.schema", settings_config_prefix), ""); + common_configuration.table = config.getString(fmt::format("{}.table", settings_config_prefix), ""); + + dictionary_configuration.emplace(PostgreSQLDictionarySource::Configuration + { + .db = common_configuration.database, + .schema = common_configuration.schema, + .table = common_configuration.table, + .query = config.getString(fmt::format("{}.query", settings_config_prefix), ""), + .where = config.getString(fmt::format("{}.where", settings_config_prefix), ""), + .invalidate_query = config.getString(fmt::format("{}.invalidate_query", settings_config_prefix), ""), + .update_field = config.getString(fmt::format("{}.update_field", settings_config_prefix), ""), + .update_lag = config.getUInt64(fmt::format("{}.update_lag", settings_config_prefix), 1) + }); + + + if (config.has(settings_config_prefix + ".replica")) + { + Poco::Util::AbstractConfiguration::Keys config_keys; + config.keys(settings_config_prefix, config_keys); + + for (const auto & config_key : config_keys) + { + if (config_key.starts_with("replica")) + { + String replica_name = settings_config_prefix + "." + config_key; + StoragePostgreSQL::Configuration replica_configuration{common_configuration}; + + size_t priority = config.getInt(replica_name + ".priority", 0); + replica_configuration.host = config.getString(replica_name + ".host", common_configuration.host); + replica_configuration.port = config.getUInt(replica_name + ".port", common_configuration.port); + replica_configuration.username = config.getString(replica_name + ".user", common_configuration.username); + replica_configuration.password = config.getString(replica_name + ".password", common_configuration.password); + + if (replica_configuration.host.empty() || replica_configuration.port == 0 + || replica_configuration.username.empty() || replica_configuration.password.empty()) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Named collection of connection parameters is missing some " + "of the parameters and no other dictionary parameters are added"); + } + + replicas_by_priority[priority].emplace_back(replica_configuration); + } + } + } + else + { + replicas_by_priority[0].emplace_back(common_configuration); + } + } if (created_from_ddl) { - for (const auto & replicas : configuration.replicas_configurations) - for (const auto & replica : replicas.second) + for (const auto & [_, replicas] : replicas_by_priority) + for (const auto & replica : replicas) context->getRemoteHostFilter().checkHostAndPort(replica.host, toString(replica.port)); } + auto pool = std::make_shared( - configuration.replicas_configurations, + replicas_by_priority, settings.postgresql_connection_pool_size, settings.postgresql_connection_pool_wait_timeout, settings.postgresql_connection_pool_retries, settings.postgresql_connection_pool_auto_close_connection, settings.postgresql_connection_attempt_timeout); - PostgreSQLDictionarySource::Configuration dictionary_configuration - { - .db = configuration.database, - .schema = configuration.schema, - .table = configuration.table, - .query = config.getString(fmt::format("{}.query", settings_config_prefix), ""), - .where = config.getString(fmt::format("{}.where", settings_config_prefix), ""), - .invalidate_query = config.getString(fmt::format("{}.invalidate_query", settings_config_prefix), ""), - .update_field = config.getString(fmt::format("{}.update_field", settings_config_prefix), ""), - .update_lag = config.getUInt64(fmt::format("{}.update_lag", settings_config_prefix), 1) - }; - return std::make_unique(dict_struct, dictionary_configuration, pool, sample_block); + return std::make_unique(dict_struct, dictionary_configuration.value(), pool, sample_block); #else (void)dict_struct; (void)config; diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index b471f3fc58f..56bfa019819 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -645,8 +645,9 @@ void CachedOnDiskReadBufferFromFile::predownload(FileSegment & file_segment) ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromSourceBytes, current_impl_buffer_size); + std::string failure_reason; bool continue_predownload = file_segment.reserve( - current_predownload_size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds); + current_predownload_size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, failure_reason); if (continue_predownload) { LOG_TEST(log, "Left to predownload: {}, buffer size: {}", bytes_to_predownload, current_impl_buffer_size); @@ -1002,7 +1003,8 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() { chassert(file_offset_of_buffer_end + size - 1 <= file_segment.range().right); - bool success = file_segment.reserve(size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds); + std::string failure_reason; + bool success = file_segment.reserve(size, settings.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, failure_reason); if (success) { chassert(file_segment.getCurrentWriteOffset() == static_cast(implementation_buffer->getPosition())); @@ -1028,7 +1030,8 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() LOG_TRACE(log, "Bypassing cache because writeCache method failed"); } else - LOG_TRACE(log, "No space left in cache to reserve {} bytes, will continue without cache download", size); + LOG_TRACE(log, "No space left in cache to reserve {} bytes, reason: {}, " + "will continue without cache download", size, failure_reason); if (!success) { diff --git a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp index 382c4a80cc4..103ae0e1832 100644 --- a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp @@ -91,7 +91,8 @@ bool FileSegmentRangeWriter::write(char * data, size_t size, size_t offset, File size_t size_to_write = std::min(available_size, size); - bool reserved = file_segment->reserve(size_to_write, reserve_space_lock_wait_timeout_milliseconds); + std::string failure_reason; + bool reserved = file_segment->reserve(size_to_write, reserve_space_lock_wait_timeout_milliseconds, failure_reason); if (!reserved) { appendFilesystemCacheLog(*file_segment); diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index bb9761a3905..c96f5f0c931 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -80,20 +80,27 @@ SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(c if (with_file_cache) { - auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path); - buf = std::make_unique( - object_path, - cache_key, - settings.remote_fs_cache, - FileCache::getCommonUser(), - [=, this]() { return read_buffer_creator(/* restricted_seek */true, object); }, - settings, - query_id, - object.bytes_size, - /* allow_seeks */false, - /* use_external_buffer */true, - /* read_until_position */std::nullopt, - cache_log); + if (settings.remote_fs_cache->isInitialized()) + { + auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path); + buf = std::make_unique( + object_path, + cache_key, + settings.remote_fs_cache, + FileCache::getCommonUser(), + [=, this]() { return read_buffer_creator(/* restricted_seek */true, object); }, + settings, + query_id, + object.bytes_size, + /* allow_seeks */false, + /* use_external_buffer */true, + /* read_until_position */std::nullopt, + cache_log); + } + else + { + settings.remote_fs_cache->throwInitExceptionIfNeeded(); + } } /// Can't wrap CachedOnDiskReadBufferFromFile in CachedInMemoryReadBufferFromFile because the diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index fb817005399..ab0d357119c 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -99,7 +99,7 @@ std::unique_ptr CachedObjectStorage::writeObject( /// N /// Need to remove even if cache_on_write == false. removeCacheIfExists(object.remote_path); - if (cache_on_write) + if (cache_on_write && cache->isInitialized()) { auto key = getCacheKey(object.remote_path); return std::make_unique( @@ -122,7 +122,8 @@ void CachedObjectStorage::removeCacheIfExists(const std::string & path_key_for_c return; /// Add try catch? - cache->removeKeyIfExists(getCacheKey(path_key_for_cache), FileCache::getCommonUser().user_id); + if (cache->isInitialized()) + cache->removeKeyIfExists(getCacheKey(path_key_for_cache), FileCache::getCommonUser().user_id); } void CachedObjectStorage::removeObject(const StoredObject & object) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 7205b5b3294..8de80971238 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -63,7 +63,7 @@ void throwIfError(const Aws::Utils::Outcome & response) { const auto & err = response.GetError(); throw S3Exception( - fmt::format("{} (Code: {}, s3 exception: {})", + fmt::format("{} (Code: {}, S3 exception: '{}')", err.GetMessage(), static_cast(err.GetErrorType()), err.GetExceptionName()), err.GetErrorType()); } diff --git a/src/Formats/EscapingRuleUtils.cpp b/src/Formats/EscapingRuleUtils.cpp index 58407a810c5..5429d8b7e0d 100644 --- a/src/Formats/EscapingRuleUtils.cpp +++ b/src/Formats/EscapingRuleUtils.cpp @@ -419,10 +419,11 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo String result = getAdditionalFormatInfoForAllRowBasedFormats(settings); /// First, settings that are common for all text formats: result += fmt::format( - ", try_infer_integers={}, try_infer_dates={}, try_infer_datetimes={}", + ", try_infer_integers={}, try_infer_dates={}, try_infer_datetimes={}, try_infer_datetimes_only_datetime64={}", settings.try_infer_integers, settings.try_infer_dates, - settings.try_infer_datetimes); + settings.try_infer_datetimes, + settings.try_infer_datetimes_only_datetime64); /// Second, format-specific settings: switch (escaping_rule) @@ -463,7 +464,7 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo settings.json.read_arrays_as_strings, settings.json.try_infer_objects_as_tuples, settings.json.infer_incomplete_types_as_strings, - settings.json.allow_object_type, + settings.json.allow_deprecated_object_type, settings.json.use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects); break; default: diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index a78836ff63c..f1214aac7dc 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -146,11 +146,13 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se format_settings.json.validate_types_from_metadata = settings.input_format_json_validate_types_from_metadata; format_settings.json.validate_utf8 = settings.output_format_json_validate_utf8; format_settings.json_object_each_row.column_for_object_name = settings.format_json_object_each_row_column_for_object_name; - format_settings.json.allow_object_type = context->getSettingsRef().allow_experimental_object_type; + format_settings.json.allow_deprecated_object_type = context->getSettingsRef().allow_experimental_object_type; + format_settings.json.allow_json_type = context->getSettingsRef().allow_experimental_json_type; format_settings.json.compact_allow_variable_number_of_columns = settings.input_format_json_compact_allow_variable_number_of_columns; format_settings.json.try_infer_objects_as_tuples = settings.input_format_json_try_infer_named_tuples_from_objects; format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence; format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields; + format_settings.json.type_json_skip_duplicated_paths = settings.type_json_skip_duplicated_paths; format_settings.null_as_default = settings.input_format_null_as_default; format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields; format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros; @@ -266,6 +268,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se format_settings.try_infer_integers = settings.input_format_try_infer_integers; format_settings.try_infer_dates = settings.input_format_try_infer_dates; format_settings.try_infer_datetimes = settings.input_format_try_infer_datetimes; + format_settings.try_infer_datetimes_only_datetime64 = settings.input_format_try_infer_datetimes_only_datetime64; format_settings.try_infer_exponent_floats = settings.input_format_try_infer_exponent_floats; format_settings.markdown.escape_special_characters = settings.output_format_markdown_escape_special_characters; format_settings.bson.output_string_as_string = settings.output_format_bson_string_as_string; @@ -280,6 +283,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se format_settings.max_parser_depth = context->getSettingsRef().max_parser_depth; format_settings.client_protocol_version = context->getClientProtocolVersion(); format_settings.date_time_overflow_behavior = settings.date_time_overflow_behavior; + format_settings.try_infer_variant = settings.input_format_try_infer_variants; /// Validate avro_schema_registry_url with RemoteHostFilter when non-empty and in Server context if (format_settings.schema.is_server) diff --git a/src/Formats/FormatSettings.h b/src/Formats/FormatSettings.h index f0359218775..ed178a68b9d 100644 --- a/src/Formats/FormatSettings.h +++ b/src/Formats/FormatSettings.h @@ -35,6 +35,7 @@ struct FormatSettings bool decimal_trailing_zeros = false; bool defaults_for_omitted_fields = true; bool is_writing_to_terminal = false; + bool try_infer_variant = false; bool seekable_read = true; UInt64 max_rows_to_read_for_schema_inference = 25000; @@ -46,6 +47,7 @@ struct FormatSettings bool try_infer_integers = true; bool try_infer_dates = true; bool try_infer_datetimes = true; + bool try_infer_datetimes_only_datetime64 = false; bool try_infer_exponent_floats = false; enum class DateTimeInputFormat : uint8_t @@ -227,13 +229,15 @@ struct FormatSettings bool try_infer_numbers_from_strings = false; bool validate_types_from_metadata = true; bool validate_utf8 = false; - bool allow_object_type = false; + bool allow_deprecated_object_type = false; + bool allow_json_type = false; bool valid_output_on_exception = false; bool compact_allow_variable_number_of_columns = false; bool try_infer_objects_as_tuples = false; bool infer_incomplete_types_as_strings = true; bool throw_on_bad_escape_sequence = true; bool ignore_unnecessary_fields = true; + bool type_json_skip_duplicated_paths = false; } json{}; struct diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 86fde3852b8..122224535a7 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -8,7 +8,6 @@ #if USE_RAPIDJSON #include #endif - #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #include #include @@ -38,8 +38,10 @@ #include #include #include +#include #include #include +#include #include @@ -53,6 +55,7 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int INCORRECT_DATA; } template @@ -123,7 +126,7 @@ void jsonElementToString(const typename JSONParser::Element & element, WriteBuff template bool tryGetNumericValueFromJSONElement( - NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, String & error) + NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, bool allow_type_conversion, String & error) { switch (element.type()) { @@ -135,7 +138,7 @@ bool tryGetNumericValueFromJSONElement( /// But it will be more convenient for user to perform conversion. value = static_cast(element.getDouble()); } - else if (!accurate::convertNumeric(element.getDouble(), value)) + else if (!allow_type_conversion || !accurate::convertNumeric(element.getDouble(), value)) { error = fmt::format("cannot convert double value {} to {}", element.getDouble(), TypeName); return false; @@ -158,7 +161,7 @@ bool tryGetNumericValueFromJSONElement( case ElementType::BOOL: if constexpr (is_integer) { - if (convert_bool_to_integer) + if (convert_bool_to_integer && allow_type_conversion) { value = static_cast(element.getBool()); break; @@ -166,13 +169,17 @@ bool tryGetNumericValueFromJSONElement( } error = fmt::format("cannot convert bool value to {}", TypeName); return false; - case ElementType::STRING: { + case ElementType::STRING: + { + if (!allow_type_conversion) + return false; + auto rb = ReadBufferFromMemory{element.getString()}; if constexpr (std::is_floating_point_v) { if (!tryReadFloatText(value, rb) || !rb.eof()) { - error = fmt::format("cannot parse {} value here: {}", TypeName, element.getString()); + error = fmt::format("cannot parse {} value here: \"{}\"", TypeName, element.getString()); return false; } } @@ -186,13 +193,13 @@ bool tryGetNumericValueFromJSONElement( rb.position() = rb.buffer().begin(); if (!tryReadFloatText(tmp_float, rb) || !rb.eof()) { - error = fmt::format("cannot parse {} value here: {}", TypeName, element.getString()); + error = fmt::format("cannot parse {} value here: \"{}\"", TypeName, element.getString()); return false; } if (!accurate::convertNumeric(tmp_float, value)) { - error = fmt::format("cannot parse {} value here: {}", TypeName, element.getString()); + error = fmt::format("cannot parse {} value here: \"{}\"", TypeName, element.getString()); return false; } } @@ -241,8 +248,16 @@ public: return false; } + if (is_bool_type && !insert_settings.allow_type_conversion) + { + if (!element.isBool()) + return false; + assert_cast &>(column).insertValue(element.getBool()); + return true; + } + NumberType value; - if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || is_bool_type, error)) + if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || is_bool_type, insert_settings.allow_type_conversion, error)) { if (error.empty()) error = fmt::format("cannot read {} value from JSON element: {}", TypeName, jsonElementToString(element, format_settings)); @@ -289,8 +304,17 @@ public: return false; } + if (this->is_bool_type && !insert_settings.allow_type_conversion) + { + if (!element.isBool()) + return false; + UInt8 value = element.getBool(); + assert_cast(column).insertData(reinterpret_cast(&value), sizeof(value)); + return true; + } + NumberType value; - if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || this->is_bool_type, error)) + if (!tryGetNumericValueFromJSONElement(value, element, insert_settings.convert_bool_to_integer || this->is_bool_type, insert_settings.allow_type_conversion, error)) { if (error.empty()) error = fmt::format("cannot read {} value from JSON element: {}", TypeName, jsonElementToString(element, format_settings)); @@ -316,7 +340,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -333,6 +357,9 @@ public: if (!element.isString()) { + if (!insert_settings.allow_type_conversion) + return false; + auto & col_str = assert_cast(column); auto & chars = col_str.getChars(); WriteBufferFromVector buf(chars, AppendModeTag()); @@ -360,7 +387,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -378,6 +405,9 @@ public: if (!element.isString()) { + if (!insert_settings.allow_type_conversion) + return false; + auto value = jsonElementToString(element, format_settings); assert_cast(column).insertData(value.data(), value.size()); } @@ -402,7 +432,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -419,7 +449,11 @@ public: } if (!element.isString()) + { + if (!insert_settings.allow_type_conversion) + return false; return checkValueSizeAndInsert(column, jsonElementToString(element, format_settings), error); + } return checkValueSizeAndInsert(column, element.getString(), error); } @@ -450,7 +484,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -466,7 +500,11 @@ public: } if (!element.isString()) + { + if (!insert_settings.allow_type_conversion) + return false; return checkValueSizeAndInsert(column, jsonElementToString(element, format_settings), error); + } return checkValueSizeAndInsert(column, element.getString(), error); } @@ -630,7 +668,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -649,7 +687,7 @@ public: return false; } } - else if (element.isUInt64()) + else if (element.isUInt64() && insert_settings.allow_type_conversion) { value = element.getUInt64(); } @@ -712,7 +750,8 @@ public: case ElementType::INT64: value = convertToDecimal, DataTypeDecimal>(element.getInt64(), scale); break; - case ElementType::STRING: { + case ElementType::STRING: + { auto rb = ReadBufferFromMemory{element.getString()}; if (!SerializationDecimal::tryReadText(value, rb, DecimalUtils::max_precision, scale)) { @@ -721,7 +760,8 @@ public: } break; } - case ElementType::NULL_VALUE: { + case ElementType::NULL_VALUE: + { if (!format_settings.null_as_default) { error = "cannot convert null to Decimal value"; @@ -756,7 +796,7 @@ public: bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, - const JSONExtractInsertSettings &, + const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override { @@ -777,6 +817,9 @@ public: } else { + if (!insert_settings.allow_type_conversion) + return false; + switch (element.type()) { case ElementType::DOUBLE: @@ -1104,7 +1147,7 @@ public: } } - if (!were_valid_elements) + if (data.size() != old_size && !were_valid_elements) { data.popBack(data.size() - old_size); return false; @@ -1174,7 +1217,7 @@ public: else { set_size(old_size); - error += fmt::format("(during reading tuple {} element)", index); + error += fmt::format(" (during reading tuple {} element)", index); return false; } } @@ -1202,7 +1245,7 @@ public: else { set_size(old_size); - error += fmt::format("(during reading tuple {} element)", index); + error += fmt::format(" (during reading tuple {} element)", index); return false; } } @@ -1221,7 +1264,7 @@ public: else if (!insert_settings.insert_default_on_invalid_elements_in_complex_types) { set_size(old_size); - error += fmt::format("(during reading tuple element \"{}\")", key); + error += fmt::format(" (during reading tuple element \"{}\")", key); return false; } } @@ -1288,7 +1331,7 @@ public: { key_col.popBack(key_col.size() - offsets.back()); value_col.popBack(value_col.size() - offsets.back()); - error += fmt::format("(during reading value of key \"{}\")", pair.first); + error += fmt::format(" (during reading value of key \"{}\")", pair.first); return false; } } @@ -1346,6 +1389,13 @@ template class DynamicNode : public JSONExtractTreeNode { public: + explicit DynamicNode( + size_t max_dynamic_paths_for_object_ = DataTypeObject::DEFAULT_MAX_SEPARATELY_STORED_PATHS, + size_t max_dynamic_types_for_object_ = DataTypeDynamic::DEFAULT_MAX_DYNAMIC_TYPES) + : max_dynamic_paths_for_object(max_dynamic_paths_for_object_), max_dynamic_types_for_object(max_dynamic_types_for_object_) + { + } + bool insertResultToColumn( IColumn & column, const typename JSONParser::Element & element, @@ -1354,7 +1404,7 @@ public: String & error) const override { auto & column_dynamic = assert_cast(column); - /// First, check if element is NULL. + /// Check if element is NULL. if (element.isNull()) { column_dynamic.insertDefault(); @@ -1363,15 +1413,52 @@ public: auto & variant_column = column_dynamic.getVariantColumn(); const auto & variant_info = column_dynamic.getVariantInfo(); - /// Second, infer ClickHouse type for this element and add it as a new variant. - auto element_type = elementToDataType(element, format_settings); + const auto & variant_types = assert_cast(*variant_info.variant_type).getVariants(); + + /// Try to insert element into current variants but with no types conversion. + /// We want to avoid inferring the type on each row, so if we can insert this element into + /// any existing variant with no types conversion (like Integer -> String, Double -> Integer, etc) + /// we will do it and won't try to infer the type. + auto shared_variant_discr = column_dynamic.getSharedVariantDiscriminator(); + auto insert_settings_with_no_type_conversion = insert_settings; + insert_settings_with_no_type_conversion.allow_type_conversion = false; + for (size_t i = 0; i != variant_info.variant_names.size(); ++i) + { + if (i != shared_variant_discr) + { + auto it = json_extract_nodes_cache.find(variant_info.variant_names[i]); + if (it == json_extract_nodes_cache.end()) + it = json_extract_nodes_cache.emplace(variant_info.variant_names[i], buildJSONExtractTree(variant_types[i], "Dynamic inference")).first; + + if (it->second->insertResultToColumn(variant_column.getVariantByGlobalDiscriminator(i), element, insert_settings_with_no_type_conversion, format_settings, error)) + { + variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(i)); + variant_column.getOffsets().push_back(variant_column.getVariantByGlobalDiscriminator(i).size() - 1); + return true; + } + } + } + + /// We couldn't insert element into current variants, infer ClickHouse type for this element and add it as a new variant. + auto element_type = removeNullable(elementToDataType(element, format_settings)); + if (!checkIfTypeIsComplete(element_type)) + { + throw Exception( + ErrorCodes::INCORRECT_DATA, + "Cannot infer the type of JSON element {}, because it contains only nulls. To use String type for elements with incomplete " + "type, enable setting input_format_json_infer_incomplete_types_as_strings", + jsonElementToString(element, format_settings)); + } + auto element_type_name = element_type->getName(); if (column_dynamic.addNewVariant(element_type, element_type_name)) { - auto node = buildJSONExtractTree(element_type, "Dynamic inference"); + auto it = json_extract_nodes_cache.find(element_type_name); + if (it == json_extract_nodes_cache.end()) + it = json_extract_nodes_cache.emplace(element_type_name, buildJSONExtractTree(element_type, "Dynamic inference")).first; auto global_discriminator = variant_info.variant_name_to_discriminator.at(element_type_name); auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discriminator); - if (!node->insertResultToColumn(variant, element, insert_settings, format_settings, error)) + if (!it->second->insertResultToColumn(variant, element, insert_settings, format_settings, error)) return false; variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(global_discriminator)); variant_column.getOffsets().push_back(variant.size() - 1); @@ -1383,25 +1470,28 @@ public: auto node = buildJSONExtractTree(element_type, "Dynamic inference"); if (!node->insertResultToColumn(*tmp_variant_column, element, insert_settings, format_settings, error)) return false; + column_dynamic.insertValueIntoSharedVariant(*tmp_variant_column, element_type, element_type_name, 0); return true; } - static DataTypePtr elementToDataType(const typename JSONParser::Element & element, const FormatSettings & format_settings) + DataTypePtr elementToDataType(const typename JSONParser::Element & element, const FormatSettings & format_settings) const { JSONInferenceInfo json_inference_info; auto type = elementToDataTypeImpl(element, format_settings, json_inference_info); transformFinalInferredJSONTypeIfNeeded(type, format_settings, &json_inference_info); + if (format_settings.schema_inference_make_columns_nullable && type->haveSubtypes()) + type = makeNullableRecursively(type); return type; } private: - static DataTypePtr elementToDataTypeImpl(const typename JSONParser::Element & element, const FormatSettings & format_settings, JSONInferenceInfo & json_inference_info) + DataTypePtr elementToDataTypeImpl(const typename JSONParser::Element & element, const FormatSettings & format_settings, JSONInferenceInfo & json_inference_info) const { switch (element.type()) { case ElementType::NULL_VALUE: - return makeNullable(std::make_shared()); + return std::make_shared(std::make_shared()); case ElementType::BOOL: return DataTypeFactory::instance().get("Bool"); case ElementType::INT64: @@ -1439,10 +1529,10 @@ private: DataTypes types; types.reserve(array.size()); for (auto value : array) - types.push_back(makeNullableSafe(elementToDataTypeImpl(value, format_settings, json_inference_info))); + types.push_back(elementToDataTypeImpl(value, format_settings, json_inference_info)); if (types.empty()) - return std::make_shared(makeNullable(std::make_shared())); + return std::make_shared(std::make_shared()); if (checkIfTypesAreEqual(types)) return std::make_shared(types.back()); @@ -1469,12 +1559,238 @@ private: return std::make_shared(types); } - case ElementType::OBJECT: { - /// TODO: Use new JSON type here when it's ready. - return std::make_shared(std::make_shared(), makeNullable(std::make_shared())); + case ElementType::OBJECT: + { + return std::make_shared(DataTypeObject::SchemaFormat::JSON, max_dynamic_paths_for_object, max_dynamic_types_for_object); } } } + + size_t max_dynamic_paths_for_object; + size_t max_dynamic_types_for_object; + + /// Avoid building JSONExtractTreeNode for the same data types on each row by using cache. + mutable std::unordered_map>> json_extract_nodes_cache; +}; + +template +class ObjectJSONNode : public JSONExtractTreeNode +{ +public: + ObjectJSONNode( + std::unordered_map>> typed_path_nodes_, + const std::unordered_set & paths_to_skip_, + const std::vector & path_regexps_to_skip_, + size_t max_dynamic_paths_, + size_t max_dynamic_types_) + : typed_path_nodes(std::move(typed_path_nodes_)) + , paths_to_skip(paths_to_skip_) + , dynamic_node(std::make_unique>( + max_dynamic_paths_ / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_PATHS_REDUCE_FACTOR, + max_dynamic_types_ / DataTypeObject::NESTED_OBJECT_MAX_DYNAMIC_TYPES_REDUCE_FACTOR)) + , dynamic_serialization(std::make_shared()) + { + sorted_paths_to_skip.assign(paths_to_skip.begin(), paths_to_skip.end()); + std::sort(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end()); + for (const auto & regexp : path_regexps_to_skip_) + path_regexps_to_skip.emplace_back(regexp); + } + + bool insertResultToColumn(IColumn & column, const typename JSONParser::Element & element, const JSONExtractInsertSettings & insert_settings, const FormatSettings & format_settings, String & error) const override + { + if (element.isNull() && format_settings.null_as_default) + { + column.insertDefault(); + return true; + } + + if (!element.isObject()) + { + error = fmt::format("Cannot read JSON object from JSON element: {}", jsonElementToString(element, format_settings)); + return false; + } + + auto & column_object = assert_cast(column); + size_t prev_size = column_object.size(); + + /// Paths in shared data should be sorted, so we cannot insert paths there during traverse. + /// Instead we collect all paths and values that should go to shared data, sort them and insert later. + /// It's not optimal, but it's a price we pay for faster reading of subcolumns. + std::vector> paths_and_values_for_shared_data; + if (!traverseAndInsert(column_object, element, "", insert_settings, format_settings, paths_and_values_for_shared_data, prev_size, error)) + { + /// If there was an error, restore previous state. + SerializationObject::restoreColumnObject(column_object, prev_size); + return false; + } + + /// Fill shared data. + auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + std::sort(paths_and_values_for_shared_data.begin(), paths_and_values_for_shared_data.end()); + for (size_t i = 0; i != paths_and_values_for_shared_data.size(); ++i) + { + const auto & [path, value] = paths_and_values_for_shared_data[i]; + /// Check if we duplicated paths. + if (i != 0 && path == paths_and_values_for_shared_data[i - 1].first) + { + if (!format_settings.json.type_json_skip_duplicated_paths) + { + error = fmt::format("Duplicate path found during parsing JSON object: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", path); + SerializationObject::restoreColumnObject(column_object, prev_size); + return false; + } + } + else + { + shared_data_paths->insertData(path.data(), path.size()); + shared_data_values->insertData(value.data(), value.size()); + } + } + column_object.getSharedDataOffsets().push_back(shared_data_paths->size()); + + /// Fill remaining typed and dynamic paths. + for (auto & [_, typed_column] : column_object.getTypedPaths()) + { + if (typed_column->size() == prev_size) + typed_column->insertDefault(); + } + + for (auto & [_, dynamic_column] : column_object.getDynamicPathsPtrs()) + { + if (dynamic_column->size() == prev_size) + dynamic_column->insertDefault(); + } + + return true; + } + +private: + bool traverseAndInsert( + ColumnObject & column_object, + const typename JSONParser::Element & element, + const String & current_path, + const JSONExtractInsertSettings & insert_settings, + const FormatSettings & format_settings, + std::vector> & paths_and_values_for_shared_data, + size_t current_size, + String & error) const + { + if (shouldSkipPath(current_path)) + return true; + + if (element.isObject() && !typed_path_nodes.contains(current_path)) + { + for (auto [key, value] : element.getObject()) + { + String path = current_path; + if (!path.empty()) + path.append("."); + path += key; + if (!traverseAndInsert(column_object, value, path, insert_settings, format_settings, paths_and_values_for_shared_data, current_size, error)) + return false; + } + + return true; + } + + auto & typed_paths = column_object.getTypedPaths(); + auto & dynamic_paths_ptrs = column_object.getDynamicPathsPtrs(); + /// Check if we have this path in typed paths. + if (auto typed_it = typed_paths.find(current_path); typed_it != typed_paths.end()) + { + /// Check if we already had this path. + if (typed_it->second->size() > current_size) + { + if (!format_settings.json.type_json_skip_duplicated_paths) + { + error = fmt::format("Duplicate path found during parsing JSON object: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", current_path); + return false; + } + } + else if (!typed_path_nodes.at(current_path)->insertResultToColumn(*typed_it->second, element, insert_settings, format_settings, error)) + { + error += fmt::format(" (while reading path {})", current_path); + return false; + } + } + /// Check if we have this path in dynamic paths. + else if (auto dynamic_it = dynamic_paths_ptrs.find(current_path); dynamic_it != dynamic_paths_ptrs.end()) + { + /// Check if we already had this path. + if (dynamic_it->second->size() > current_size) + { + if (!format_settings.json.type_json_skip_duplicated_paths) + { + error = fmt::format("Duplicate path found during parsing JSON object: {}. You can enable setting type_json_skip_duplicated_paths to skip duplicated paths during insert", current_path); + return false; + } + } + else if (!dynamic_node->insertResultToColumn(*dynamic_it->second, element, insert_settings, format_settings, error)) + { + error += fmt::format(" (while reading path {})", current_path); + return false; + } + } + /// Don't create new dynamic paths for null and don't insert null values into shared data. + /// We consider null equivalent to the absence of this path. + else if (element.isNull()) + { + } + /// Try to add a new dynamic path. + else if (auto * dynamic_column = column_object.tryToAddNewDynamicPath(current_path)) + { + if (!dynamic_node->insertResultToColumn(*dynamic_column, element, insert_settings, format_settings, error)) + { + error += fmt::format(" (while reading path {})", current_path); + return false; + } + } + /// Otherwise this path should go to the shared data. + else + { + auto tmp_dynamic_column = ColumnDynamic::create(); + tmp_dynamic_column->reserve(1); + if (!dynamic_node->insertResultToColumn(*tmp_dynamic_column, element, insert_settings, format_settings, error)) + { + error += fmt::format(" (while reading path {})", current_path); + return false; + } + + paths_and_values_for_shared_data.emplace_back(current_path, ""); + WriteBufferFromString buf(paths_and_values_for_shared_data.back().second); + dynamic_serialization->serializeBinary(*tmp_dynamic_column, 0, buf, format_settings); + } + + return true; + } + + bool shouldSkipPath(const String & path) const + { + if (paths_to_skip.contains(path)) + return true; + + if (!sorted_paths_to_skip.empty()) + { + auto it = std::lower_bound(sorted_paths_to_skip.begin(), sorted_paths_to_skip.end(), path); + if (it != sorted_paths_to_skip.begin() && path.starts_with(*std::prev(it))) + return true; + } + + for (const auto & regexp : path_regexps_to_skip) + { + if (re2::RE2::FullMatch(path, regexp)) + return true; + } + + return false; + } + + std::unordered_map>> typed_path_nodes; + std::unordered_set paths_to_skip; + std::vector sorted_paths_to_skip; + std::list path_regexps_to_skip; + std::unique_ptr> dynamic_node; + std::shared_ptr dynamic_serialization; }; } @@ -1621,6 +1937,26 @@ std::unique_ptr> buildJSONExtractTree(const Data } case TypeIndex::Dynamic: return std::make_unique>(); + case TypeIndex::Object: + { + const auto & object_type = assert_cast(*type); + const auto & typed_paths = object_type.getTypedPaths(); + std::unordered_map>> typed_path_nodes; + typed_path_nodes.reserve(typed_paths.size()); + for (const auto & [path, path_type] : typed_paths) + typed_path_nodes[path] = buildJSONExtractTree(path_type, source_for_exception_message); + + switch (object_type.getSchemaFormat()) + { + case DataTypeObject::SchemaFormat::JSON: + return std::make_unique>( + std::move(typed_path_nodes), + object_type.getPathsToSkip(), + object_type.getPathRegexpsToSkip(), + object_type.getMaxDynamicPaths(), + object_type.getMaxDynamicTypes()); + } + } default: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, @@ -1638,7 +1974,7 @@ template std::unique_ptr> buildJSONExtractTr #if USE_RAPIDJSON template void jsonElementToString(const RapidJSONParser::Element & element, WriteBuffer & buf, const FormatSettings & format_settings); template std::unique_ptr> buildJSONExtractTree(const DataTypePtr & type, const char * source_for_exception_message); -template bool tryGetNumericValueFromJSONElement(Float64 & value, const RapidJSONParser::Element & element, bool convert_bool_to_integer, String & error); +template bool tryGetNumericValueFromJSONElement(Float64 & value, const RapidJSONParser::Element & element, bool convert_bool_to_integer, bool allow_type_conversion, String & error); #else template void jsonElementToString(const DummyJSONParser::Element & element, WriteBuffer & buf, const FormatSettings & format_settings); template std::unique_ptr> buildJSONExtractTree(const DataTypePtr & type, const char * source_for_exception_message); diff --git a/src/Formats/JSONExtractTree.h b/src/Formats/JSONExtractTree.h index b5e82506548..89f2d191dfb 100644 --- a/src/Formats/JSONExtractTree.h +++ b/src/Formats/JSONExtractTree.h @@ -17,6 +17,9 @@ struct JSONExtractInsertSettings /// For example, if we have [1, "hello", 2] and type Array(UInt32), /// we will insert [1, 0, 2] in the column. Used in all JSONExtract functions. bool insert_default_on_invalid_elements_in_complex_types = false; + /// If false, JSON value will be inserted into column only if type of the value is + /// the same as column type (no conversions like Integer -> String, Integer -> Float, etc). + bool allow_type_conversion = true; }; template @@ -36,6 +39,6 @@ template void jsonElementToString(const typename JSONParser::Element & element, WriteBuffer & buf, const FormatSettings & format_settings); template -bool tryGetNumericValueFromJSONElement(NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, String & error); +bool tryGetNumericValueFromJSONElement(NumberType & value, const typename JSONParser::Element & element, bool convert_bool_to_integer, bool allow_type_conversion, String & error); } diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index 017befe5b0e..9d898cd2470 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index 3c374ada9e6..e8eab3b4453 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -7,11 +7,12 @@ #include #include #include +#include #include #include #include #include -#include +#include #include #include #include @@ -306,37 +307,72 @@ namespace type_indexes.erase(TypeIndex::UInt64); } - /// If we have only Date and DateTime types, convert Date to DateTime, - /// otherwise, convert all Date and DateTime to String. + /// if setting 'try_infer_variant' is true then we convert to type variant. + void transformVariant(DataTypes & data_types, TypeIndexesSet & type_indexes) + { + if (checkIfTypesAreEqual(data_types)) + return; + + DataTypes variant_types; + for (const auto & type : data_types) + { + if (const auto * variant_type = typeid_cast(type.get())) + { + const auto & current_variants = variant_type->getVariants(); + variant_types.insert(variant_types.end(), current_variants.begin(), current_variants.end()); + } + else + { + variant_types.push_back(type); + } + } + + auto variant_type = std::make_shared(variant_types); + + for (auto & type : data_types) + type = variant_type; + type_indexes = {TypeIndex::Variant}; + } + + /// If we have only date/datetimes types (Date/DateTime/DateTime64), convert all of them to the common type, + /// otherwise, convert all Date, DateTime and DateTime64 to String. void transformDatesAndDateTimes(DataTypes & data_types, TypeIndexesSet & type_indexes) { bool have_dates = type_indexes.contains(TypeIndex::Date); - bool have_datetimes = type_indexes.contains(TypeIndex::DateTime64); - bool all_dates_or_datetimes = (type_indexes.size() == (static_cast(have_dates) + static_cast(have_datetimes))); + bool have_datetimes = type_indexes.contains(TypeIndex::DateTime); + bool have_datetimes64 = type_indexes.contains(TypeIndex::DateTime64); + bool all_dates_or_datetimes = (type_indexes.size() == (static_cast(have_dates) + static_cast(have_datetimes) + static_cast(have_datetimes64))); - if (!all_dates_or_datetimes && (have_dates || have_datetimes)) + if (!all_dates_or_datetimes && (have_dates || have_datetimes || have_datetimes64)) { for (auto & type : data_types) { - if (isDate(type) || isDateTime64(type)) + if (isDate(type) || isDateTime(type) || isDateTime64(type)) type = std::make_shared(); } type_indexes.erase(TypeIndex::Date); type_indexes.erase(TypeIndex::DateTime); + type_indexes.erase(TypeIndex::DateTime64); type_indexes.insert(TypeIndex::String); return; } - if (have_dates && have_datetimes) + for (auto & type : data_types) { - for (auto & type : data_types) + if (isDate(type) && (have_datetimes || have_datetimes64)) { - if (isDate(type)) + if (have_datetimes64) type = std::make_shared(9); + else + type = std::make_shared(); + type_indexes.erase(TypeIndex::Date); + } + else if (isDateTime(type) && have_datetimes64) + { + type = std::make_shared(9); + type_indexes.erase(TypeIndex::DateTime); } - - type_indexes.erase(TypeIndex::Date); } } @@ -644,7 +680,11 @@ namespace transformDatesAndDateTimes(data_types, type_indexes); if constexpr (!is_json) + { + if (settings.try_infer_variant) + transformVariant(data_types, type_indexes); return; + } /// Check settings specific for JSON formats. @@ -662,6 +702,10 @@ namespace if (settings.json.try_infer_objects_as_tuples) mergeJSONPaths(data_types, type_indexes, settings, json_info); + + if (settings.try_infer_variant) + transformVariant(data_types, type_indexes); + }; auto transform_complex_types = [&](DataTypes & data_types, TypeIndexesSet & type_indexes) @@ -674,7 +718,11 @@ namespace transformNothingComplexTypes(data_types, type_indexes); if constexpr (!is_json) + { + if (settings.try_infer_variant) + transformVariant(data_types, type_indexes); return; + } /// Convert JSON tuples with same nested types to arrays. transformTuplesWithEqualNestedTypesToArrays(data_types, type_indexes); @@ -687,6 +735,9 @@ namespace if (json_info && json_info->allow_merging_named_tuples) mergeNamedTuples(data_types, type_indexes, settings, json_info); + + if (settings.try_infer_variant) + transformVariant(data_types, type_indexes); }; transformTypesRecursively(types, transform_simple_types, transform_complex_types); @@ -697,55 +748,87 @@ namespace bool tryInferDate(std::string_view field) { - if (field.empty()) + /// Minimum length of Date text representation is 8 (YYYY-M-D) and maximum is 10 (YYYY-MM-DD) + if (field.size() < 8 || field.size() > 10) return false; - ReadBufferFromString buf(field); - Float64 tmp_float; /// Check if it's just a number, and if so, don't try to infer Date from it, /// because we can interpret this number as a Date (for example 20000101 will be 2000-01-01) /// and it will lead to inferring Date instead of simple Int64/UInt64 in some cases. - if (tryReadFloatText(tmp_float, buf) && buf.eof()) - return false; - - buf.seek(0, SEEK_SET); /// Return position to the beginning - - DayNum tmp; - return tryReadDateText(tmp, buf) && buf.eof(); - } - - bool tryInferDateTime(std::string_view field, const FormatSettings & settings) - { - if (field.empty()) + if (std::all_of(field.begin(), field.end(), isNumericASCII)) return false; ReadBufferFromString buf(field); - Float64 tmp_float; + DayNum tmp; + return tryReadDateText(tmp, buf, DateLUT::instance(), /*allowed_delimiters=*/"-/:") && buf.eof(); + } + + DataTypePtr tryInferDateTimeOrDateTime64(std::string_view field, const FormatSettings & settings) + { + /// Don't try to infer DateTime if string is too long. + /// It's difficult to say what is the real maximum length of + /// DateTime we can parse using BestEffort approach. + /// 50 symbols is more or less valid limit for date times that makes sense. + if (field.empty() || field.size() > 50) + return nullptr; + + /// Check that we have at least one digit, don't infer datetime form strings like "Apr"/"May"/etc. + if (!std::any_of(field.begin(), field.end(), isNumericASCII)) + return nullptr; + /// Check if it's just a number, and if so, don't try to infer DateTime from it, /// because we can interpret this number as a timestamp and it will lead to - /// inferring DateTime instead of simple Int64/Float64 in some cases. + /// inferring DateTime instead of simple Int64 in some cases. + if (std::all_of(field.begin(), field.end(), isNumericASCII)) + return nullptr; + + ReadBufferFromString buf(field); + Float64 tmp_float; + /// Check if it's a float value, and if so, don't try to infer DateTime from it, + /// because it will lead to inferring DateTime instead of simple Float64 in some cases. if (tryReadFloatText(tmp_float, buf) && buf.eof()) - return false; + return nullptr; + + buf.seek(0, SEEK_SET); /// Return position to the beginning + if (!settings.try_infer_datetimes_only_datetime64) + { + time_t tmp; + switch (settings.date_time_input_format) + { + case FormatSettings::DateTimeInputFormat::Basic: + if (tryReadDateTimeText(tmp, buf, DateLUT::instance(), /*allowed_date_delimiters=*/"-/:", /*allowed_time_delimiters=*/":") && buf.eof()) + return std::make_shared(); + break; + case FormatSettings::DateTimeInputFormat::BestEffort: + if (tryParseDateTimeBestEffortStrict(tmp, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof()) + return std::make_shared(); + break; + case FormatSettings::DateTimeInputFormat::BestEffortUS: + if (tryParseDateTimeBestEffortUSStrict(tmp, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof()) + return std::make_shared(); + break; + } + } buf.seek(0, SEEK_SET); /// Return position to the beginning DateTime64 tmp; switch (settings.date_time_input_format) { case FormatSettings::DateTimeInputFormat::Basic: - if (tryReadDateTime64Text(tmp, 9, buf) && buf.eof()) - return true; + if (tryReadDateTime64Text(tmp, 9, buf, DateLUT::instance(), /*allowed_date_delimiters=*/"-/:", /*allowed_time_delimiters=*/":") && buf.eof()) + return std::make_shared(9); break; case FormatSettings::DateTimeInputFormat::BestEffort: - if (tryParseDateTime64BestEffort(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC")) && buf.eof()) - return true; + if (tryParseDateTime64BestEffortStrict(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof()) + return std::make_shared(9); break; case FormatSettings::DateTimeInputFormat::BestEffortUS: - if (tryParseDateTime64BestEffortUS(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC")) && buf.eof()) - return true; + if (tryParseDateTime64BestEffortUSStrict(tmp, 9, buf, DateLUT::instance(), DateLUT::instance("UTC"), /*allowed_date_delimiters=*/"-/:") && buf.eof()) + return std::make_shared(9); break; } - return false; + return nullptr; } template @@ -821,7 +904,6 @@ namespace if (checkIfTypesAreEqual(nested_types_copy)) return std::make_shared(nested_types_copy.back()); - return std::make_shared(nested_types); } else @@ -1176,8 +1258,8 @@ namespace { if constexpr (is_json) { - if (settings.json.allow_object_type) - return std::make_shared("json", true); + if (settings.json.allow_deprecated_object_type) + return std::make_shared("json", true); } /// Empty Map is Map(Nothing, Nothing) @@ -1186,8 +1268,8 @@ namespace if constexpr (is_json) { - if (settings.json.allow_object_type) - return std::make_shared("json", true); + if (settings.json.allow_deprecated_object_type) + return std::make_shared("json", true); if (settings.json.read_objects_as_strings) return std::make_shared(); @@ -1242,7 +1324,7 @@ namespace { if constexpr (is_json) { - if (!settings.json.allow_object_type && settings.json.try_infer_objects_as_tuples) + if (!settings.json.allow_deprecated_object_type && settings.json.try_infer_objects_as_tuples) return tryInferJSONPaths(buf, settings, json_info, depth); } @@ -1262,7 +1344,7 @@ namespace if (checkCharCaseInsensitive('n', buf)) { if (checkStringCaseInsensitive("ull", buf)) - return makeNullable(std::make_shared()); + return std::make_shared(std::make_shared()); else if (checkStringCaseInsensitive("an", buf)) return std::make_shared(); } @@ -1416,6 +1498,15 @@ void transformFinalInferredJSONTypeIfNeededImpl(DataTypePtr & data_type, const F return; } + + if (const auto * variant_type = typeid_cast(data_type.get())) + { + auto nested_types = variant_type->getVariants(); + for (auto & nested_type : nested_types) + transformFinalInferredJSONTypeIfNeededImpl(nested_type, settings, json_info, remain_nothing_types); + data_type = std::make_shared(nested_types); + return; + } } void transformFinalInferredJSONTypeIfNeeded(DataTypePtr & data_type, const FormatSettings & settings, JSONInferenceInfo * json_info) @@ -1439,8 +1530,11 @@ DataTypePtr tryInferDateOrDateTimeFromString(std::string_view field, const Forma if (settings.try_infer_dates && tryInferDate(field)) return std::make_shared(); - if (settings.try_infer_datetimes && tryInferDateTime(field, settings)) - return std::make_shared(9); + if (settings.try_infer_datetimes) + { + if (auto type = tryInferDateTimeOrDateTime64(field, settings)) + return type; + } return nullptr; } @@ -1492,6 +1586,20 @@ DataTypePtr makeNullableRecursively(DataTypePtr type) return nested_type ? std::make_shared(nested_type) : nullptr; } + if (which.isVariant()) + { + const auto * variant_type = assert_cast(type.get()); + DataTypes nested_types; + for (const auto & nested_type: variant_type->getVariants()) + { + if (!nested_type->lowCardinality() && nested_type->haveSubtypes()) + nested_types.push_back(makeNullableRecursively(nested_type)); + else + nested_types.push_back(nested_type); + } + return std::make_shared(nested_types); + } + if (which.isTuple()) { const auto * tuple_type = assert_cast(type.get()); @@ -1525,15 +1633,15 @@ DataTypePtr makeNullableRecursively(DataTypePtr type) return nested_type ? std::make_shared(nested_type) : nullptr; } - if (which.isObject()) + if (which.isObjectDeprecated()) { - const auto * object_type = assert_cast(type.get()); + const auto * object_type = assert_cast(type.get()); if (object_type->hasNullableSubcolumns()) return type; - return std::make_shared(object_type->getSchemaFormat(), true); + return std::make_shared(object_type->getSchemaFormat(), true); } - return makeNullable(type); + return makeNullableSafe(type); } NamesAndTypesList getNamesAndRecursivelyNullableTypes(const Block & header) diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 0f54898177b..d94f0d90e1b 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -3880,7 +3882,7 @@ private: "Expected tuple with {} subcolumn, but got {} subcolumns", tuple_size, column_tuple.getColumns().size()); - auto res = ColumnObject::create(has_nullable_subcolumns); + auto res = ColumnObjectDeprecated::create(has_nullable_subcolumns); for (size_t i = 0; i < tuple_size; ++i) { ColumnsWithTypeAndName element = {{column_tuple.getColumns()[i], from_types[i], "" }}; @@ -3957,7 +3959,7 @@ private: subcolumn->insertDefault(); } - auto column_object = ColumnObject::create(has_nullable_subcolumns); + auto column_object = ColumnObjectDeprecated::create(has_nullable_subcolumns); for (auto && [key, subcolumn] : subcolumns) { PathInData path(key.toView()); @@ -3968,7 +3970,7 @@ private: }; } - WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_type) const + WrapperType createObjectDeprecatedWrapper(const DataTypePtr & from_type, const DataTypeObjectDeprecated * to_type) const { if (const auto * from_tuple = checkAndGetDataType(from_type.get())) { @@ -3987,12 +3989,12 @@ private: return res; }; } - else if (checkAndGetDataType(from_type.get())) + else if (checkAndGetDataType(from_type.get())) { return [is_nullable = to_type->hasNullableSubcolumns()] (ColumnsWithTypeAndName & arguments, const DataTypePtr & , const ColumnNullable * , size_t) -> ColumnPtr { - const auto & column_object = assert_cast(*arguments.front().column); - auto res = ColumnObject::create(is_nullable); + const auto & column_object = assert_cast(*arguments.front().column); + auto res = ColumnObjectDeprecated::create(is_nullable); for (size_t i = 0; i < column_object.size(); i++) res->insert(column_object[i]); @@ -4005,6 +4007,25 @@ private: "Cast to Object can be performed only from flatten named Tuple, Map or String. Got: {}", from_type->getName()); } + WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_object) const + { + if (checkAndGetDataType(from_type.get())) + { + return [this](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) + { + auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context)->assumeMutable(); + res->finalize(); + return res; + }; + } + + /// TODO: support CAST between JSON types with different parameters + /// support CAST from Map to JSON + /// support CAST from Tuple to JSON + /// support CAST from Object('json') to JSON + throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to {} can be performed only from String. Got: {}", magic_enum::enum_name(to_object->getSchemaFormat()), from_type->getName()); + } + WrapperType createVariantToVariantWrapper(const DataTypeVariant & from_variant, const DataTypeVariant & to_variant) const { /// We support only extension of variant type, so, only new types can be added. @@ -5079,6 +5100,8 @@ private: return createTupleWrapper(from_type, checkAndGetDataType(to_type.get())); case TypeIndex::Map: return createMapWrapper(from_type, checkAndGetDataType(to_type.get())); + case TypeIndex::ObjectDeprecated: + return createObjectDeprecatedWrapper(from_type, checkAndGetDataType(to_type.get())); case TypeIndex::Object: return createObjectWrapper(from_type, checkAndGetDataType(to_type.get())); case TypeIndex::AggregateFunction: diff --git a/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h index 95c54ac9528..0cf4246fd66 100644 --- a/src/Functions/FunctionsHashing.h +++ b/src/Functions/FunctionsHashing.h @@ -77,64 +77,70 @@ namespace impl ColumnPtr key0; ColumnPtr key1; bool is_const; - const ColumnArray::Offsets * offsets{}; + const ColumnArray::Offsets * offsets = nullptr; size_t size() const { assert(key0 && key1); assert(key0->size() == key1->size()); - assert(offsets == nullptr || offsets->size() == key0->size()); - if (offsets != nullptr) + if (offsets != nullptr && !offsets->empty()) return offsets->back(); return key0->size(); } + SipHashKey getKey(size_t i) const { if (is_const) i = 0; + assert(key0->size() == key1->size()); if (offsets != nullptr) { - const auto *const begin = offsets->begin(); + const auto * const begin = offsets->begin(); const auto * upper = std::upper_bound(begin, offsets->end(), i); - if (upper == offsets->end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "offset {} not found in function SipHashKeyColumns::getKey", i); - i = upper - begin; + if (upper != offsets->end()) + i = upper - begin; } const auto & key0data = assert_cast(*key0).getData(); const auto & key1data = assert_cast(*key1).getData(); + assert(key0->size() > i); return {key0data[i], key1data[i]}; } }; static SipHashKeyColumns parseSipHashKeyColumns(const ColumnWithTypeAndName & key) { - const ColumnTuple * tuple = nullptr; - const auto * column = key.column.get(); - bool is_const = false; - if (isColumnConst(*column)) + const auto * col_key = key.column.get(); + + bool is_const; + const ColumnTuple * col_key_tuple; + if (isColumnConst(*col_key)) { is_const = true; - tuple = checkAndGetColumnConstData(column); + col_key_tuple = checkAndGetColumnConstData(col_key); } else - tuple = checkAndGetColumn(column); - if (!tuple) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "key must be a tuple"); - if (tuple->tupleSize() != 2) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "wrong tuple size: key must be a tuple of 2 UInt64"); + { + is_const = false; + col_key_tuple = checkAndGetColumn(col_key); + } - SipHashKeyColumns ret{tuple->getColumnPtr(0), tuple->getColumnPtr(1), is_const}; - assert(ret.key0); - if (!checkColumn(*ret.key0)) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "first element of the key tuple is not UInt64"); - assert(ret.key1); - if (!checkColumn(*ret.key1)) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "second element of the key tuple is not UInt64"); + if (!col_key_tuple || col_key_tuple->tupleSize() != 2) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The key must be of type Tuple(UInt64, UInt64)"); - if (ret.size() == 1) - ret.is_const = true; + SipHashKeyColumns result{.key0 = col_key_tuple->getColumnPtr(0), .key1 = col_key_tuple->getColumnPtr(1), .is_const = is_const}; - return ret; + assert(result.key0); + assert(result.key1); + + if (!checkColumn(*result.key0)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The 1st element of the key tuple is not of type UInt64"); + if (!checkColumn(*result.key1)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The 2nd element of the key tuple is not of type UInt64"); + + if (result.size() == 1) + result.is_const = true; + + return result; } } diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index 1eaf0d1609a..e6892642d56 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -739,7 +739,7 @@ public: { NumberType value; - if (!tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, error)) + if (!tryGetNumericValueFromJSONElement(value, element, convert_bool_to_integer, /*allow_type_conversion=*/true, error)) return false; auto & col_vec = assert_cast &>(dest); col_vec.insertValue(value); diff --git a/src/Functions/JSONPaths.cpp b/src/Functions/JSONPaths.cpp new file mode 100644 index 00000000000..dfb0386e370 --- /dev/null +++ b/src/Functions/JSONPaths.cpp @@ -0,0 +1,518 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; +} + +namespace +{ + +enum class PathsMode +{ + ALL_PATHS, + DYNAMIC_PATHS, + SHARED_DATA_PATHS, +}; + +struct JSONAllPathsImpl +{ + static constexpr auto name = "JSONAllPaths"; + static constexpr auto paths_mode = PathsMode::ALL_PATHS; + static constexpr auto with_types = false; +}; + +struct JSONAllPathsWithTypesImpl +{ + static constexpr auto name = "JSONAllPathsWithTypes"; + static constexpr auto paths_mode = PathsMode::ALL_PATHS; + static constexpr auto with_types = true; +}; + +struct JSONDynamicPathsImpl +{ + static constexpr auto name = "JSONDynamicPaths"; + static constexpr auto paths_mode = PathsMode::DYNAMIC_PATHS; + static constexpr auto with_types = false; +}; + +struct JSONDynamicPathsWithTypesImpl +{ + static constexpr auto name = "JSONDynamicPathsWithTypes"; + static constexpr auto paths_mode = PathsMode::DYNAMIC_PATHS; + static constexpr auto with_types = true; +}; + +struct JSONSharedDataPathsImpl +{ + static constexpr auto name = "JSONSharedDataPaths"; + static constexpr auto paths_mode = PathsMode::SHARED_DATA_PATHS; + static constexpr auto with_types = false; +}; + +struct JSONSharedDataPathsWithTypesImpl +{ + static constexpr auto name = "JSONSharedDataPathsWithTypes"; + static constexpr auto paths_mode = PathsMode::SHARED_DATA_PATHS; + static constexpr auto with_types = true; +}; + +/// Implements functions that extracts paths and types from JSON object column. +/// Used for introspection of the content of the JSON object column. +template +class FunctionJSONPaths : public IFunction +{ +public: + static constexpr auto name = Impl::name; + + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + std::string getName() const override + { + return name; + } + + size_t getNumberOfArguments() const override { return 1; } + bool useDefaultImplementationForConstants() const override { return true; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + DataTypePtr getReturnTypeImpl(const DataTypes & data_types) const override + { + if (data_types.size() != 1) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires single argument with type JSON", getName()); + + if (data_types[0]->getTypeId() != TypeIndex::Object) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires argument with type JSON, got: {}", getName(),data_types[0]->getName()); + + if constexpr (Impl::with_types) + return std::make_shared(std::make_shared(), std::make_shared()); + return std::make_shared(std::make_shared()); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + { + const ColumnWithTypeAndName & elem = arguments[0]; + const auto * column_object = typeid_cast(elem.column.get()); + if (!column_object) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected column type in function {}. Expected Object column, got {}", getName(), elem.column->getName()); + + const auto & type_object = assert_cast(*elem.type); + if constexpr (Impl::with_types) + return executeWithTypes(*column_object, type_object); + return executeWithoutTypes(*column_object); + } + +private: + ColumnPtr executeWithoutTypes(const ColumnObject & column_object) const + { + if constexpr (Impl::paths_mode == PathsMode::SHARED_DATA_PATHS) + { + /// No need to do anything, we already have a column with all sorted paths in shared data. + const auto & shared_data_array = column_object.getSharedDataNestedColumn(); + const auto & shared_data_paths = assert_cast(shared_data_array.getData()).getColumnPtr(0); + return ColumnArray::create(shared_data_paths, shared_data_array.getOffsetsPtr()); + } + + auto res = ColumnArray::create(ColumnString::create()); + auto & offsets = res->getOffsets(); + ColumnString & data = assert_cast(res->getData()); + + if constexpr (Impl::paths_mode == PathsMode::DYNAMIC_PATHS) + { + /// Collect all dynamic paths. + const auto & dynamic_path_columns = column_object.getDynamicPaths(); + std::vector dynamic_paths; + dynamic_paths.reserve(dynamic_path_columns.size()); + for (const auto & [path, _] : dynamic_path_columns) + dynamic_paths.push_back(path); + /// We want the resulting arrays of paths to be sorted for consistency. + std::sort(dynamic_paths.begin(), dynamic_paths.end()); + + size_t size = column_object.size(); + for (size_t i = 0; i != size; ++i) + { + for (const auto path : dynamic_paths) + { + /// Don't include path if it contains NULL, because we consider + /// it to be equivalent to the absence of this path in this row. + if (!dynamic_path_columns.find(path)->second->isNullAt(i)) + data.insertData(path.data(), path.size()); + } + offsets.push_back(data.size()); + } + return res; + } + + /// Collect all paths: typed, dynamic and paths from shared data. + std::vector sorted_dynamic_and_typed_paths; + const auto & typed_path_columns = column_object.getTypedPaths(); + const auto & dynamic_path_columns = column_object.getDynamicPaths(); + sorted_dynamic_and_typed_paths.reserve(typed_path_columns.size() + dynamic_path_columns.size()); + for (const auto & [path, _] : typed_path_columns) + sorted_dynamic_and_typed_paths.push_back(path); + for (const auto & [path, _] : dynamic_path_columns) + sorted_dynamic_and_typed_paths.push_back(path); + + /// We want the resulting arrays of paths to be sorted for consistency. + std::sort(sorted_dynamic_and_typed_paths.begin(), sorted_dynamic_and_typed_paths.end()); + + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, _] = column_object.getSharedDataPathsAndValues(); + for (size_t i = 0; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[static_cast(i) - 1]; + size_t end = shared_data_offsets[static_cast(i)]; + /// Merge sorted list of paths from shared data and sorted_dynamic_and_typed_paths + size_t sorted_paths_index = 0; + for (size_t j = start; j != end; ++j) + { + auto shared_data_path = shared_data_paths->getDataAt(j).toView(); + while (sorted_paths_index != sorted_dynamic_and_typed_paths.size() && sorted_dynamic_and_typed_paths[sorted_paths_index] < shared_data_path) + { + const auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; + /// If it's dynamic path include it only if it's not NULL. + if (auto it = dynamic_path_columns.find(path); it == dynamic_path_columns.end() || !it->second->isNullAt(i)) + data.insertData(path.data(), path.size()); + ++sorted_paths_index; + } + + data.insertData(shared_data_path.data(), shared_data_path.size()); + } + + for (; sorted_paths_index != sorted_dynamic_and_typed_paths.size(); ++sorted_paths_index) + { + const auto path = sorted_dynamic_and_typed_paths[sorted_paths_index]; + if (auto it = dynamic_path_columns.find(path); it == dynamic_path_columns.end() || !it->second->isNullAt(i)) + data.insertData(path.data(), path.size()); + } + + offsets.push_back(data.size()); + } + + return res; + } + + ColumnPtr executeWithTypes(const ColumnObject & column_object, const DataTypeObject & type_object) const + { + auto offsets_column = ColumnArray::ColumnOffsets::create(); + auto & offsets = offsets_column->getData(); + auto paths_column = ColumnString::create(); + auto types_column = ColumnString::create(); + + if constexpr (Impl::paths_mode == PathsMode::DYNAMIC_PATHS) + { + const auto & dynamic_path_columns = column_object.getDynamicPaths(); + std::vector sorted_dynamic_paths; + sorted_dynamic_paths.reserve(dynamic_path_columns.size()); + for (const auto & [path, _] : dynamic_path_columns) + sorted_dynamic_paths.push_back(path); + /// We want the resulting arrays of paths and values to be sorted for consistency. + std::sort(sorted_dynamic_paths.begin(), sorted_dynamic_paths.end()); + + /// Iterate over all rows and extract types from dynamic columns. + for (size_t i = 0; i != column_object.size(); ++i) + { + for (const auto path : sorted_dynamic_paths) + { + const auto & column = dynamic_path_columns.find(path)->second; + if (!column->isNullAt(i)) + { + auto type = getDynamicValueType(column, i); + paths_column->insertData(path.data(), path.size()); + types_column->insertData(type.data(), type.size()); + } + } + + offsets.push_back(paths_column->size()); + } + + return ColumnMap::create(ColumnPtr(std::move(paths_column)), ColumnPtr(std::move(types_column)), ColumnPtr(std::move(offsets_column))); + } + + if constexpr (Impl::paths_mode == PathsMode::SHARED_DATA_PATHS) + { + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + /// Iterate over all rows and extract types from dynamic values in shared data. + for (size_t i = 0; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[static_cast(i) - 1]; + size_t end = shared_data_offsets[static_cast(i)]; + for (size_t j = start; j != end; ++j) + { + if (auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j))) + { + paths_column->insertFrom(*shared_data_paths, j); + types_column->insertData(type_name->data(), type_name->size()); + } + } + + offsets.push_back(paths_column->size()); + } + + return ColumnMap::create(ColumnPtr(std::move(paths_column)), ColumnPtr(std::move(types_column)), ColumnPtr(std::move(offsets_column))); + } + + /// Iterate over all rows and extract types from dynamic columns from dynamic paths and from values in shared data. + std::vector> sorted_typed_and_dynamic_paths_with_types; + const auto & typed_path_types = type_object.getTypedPaths(); + const auto & dynamic_path_columns = column_object.getDynamicPaths(); + sorted_typed_and_dynamic_paths_with_types.reserve(typed_path_types.size() + dynamic_path_columns.size()); + for (const auto & [path, type] : typed_path_types) + sorted_typed_and_dynamic_paths_with_types.emplace_back(path, type->getName()); + for (const auto & [path, _] : dynamic_path_columns) + sorted_typed_and_dynamic_paths_with_types.emplace_back(path, ""); + + /// We want the resulting arrays of paths and values to be sorted for consistency. + std::sort(sorted_typed_and_dynamic_paths_with_types.begin(), sorted_typed_and_dynamic_paths_with_types.end()); + + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + for (size_t i = 0; i != shared_data_offsets.size(); ++i) + { + size_t start = shared_data_offsets[static_cast(i) - 1]; + size_t end = shared_data_offsets[static_cast(i)]; + /// Merge sorted list of paths and values from shared data and sorted_typed_and_dynamic_paths_with_types + size_t sorted_paths_index = 0; + for (size_t j = start; j != end; ++j) + { + auto shared_data_path = shared_data_paths->getDataAt(j).toView(); + auto type_name = getDynamicValueTypeFromSharedData(shared_data_values->getDataAt(j)); + /// Skip NULL values. + if (!type_name) + continue; + + while (sorted_paths_index != sorted_typed_and_dynamic_paths_with_types.size() && sorted_typed_and_dynamic_paths_with_types[sorted_paths_index].first < shared_data_path) + { + auto & [path, type] = sorted_typed_and_dynamic_paths_with_types[sorted_paths_index]; + /// Update type for path from dynamic paths. + if (auto it = dynamic_path_columns.find(path); it != dynamic_path_columns.end()) + { + /// Skip NULL values. + if (it->second->isNullAt(i)) + { + ++sorted_paths_index; + continue; + } + type = getDynamicValueType(it->second, i); + } + paths_column->insertData(path.data(), path.size()); + types_column->insertData(type.data(), type.size()); + ++sorted_paths_index; + } + + paths_column->insertData(shared_data_path.data(), shared_data_path.size()); + types_column->insertData(type_name->data(), type_name->size()); + } + + for (; sorted_paths_index != sorted_typed_and_dynamic_paths_with_types.size(); ++sorted_paths_index) + { + auto & [path, type] = sorted_typed_and_dynamic_paths_with_types[sorted_paths_index]; + if (auto it = dynamic_path_columns.find(path); it != dynamic_path_columns.end()) + { + /// Skip NULL values. + if (it->second->isNullAt(i)) + continue; + type = getDynamicValueType(it->second, i); + } + paths_column->insertData(path.data(), path.size()); + types_column->insertData(type.data(), type.size()); + } + + offsets.push_back(paths_column->size()); + } + + return ColumnMap::create(ColumnPtr(std::move(paths_column)), ColumnPtr(std::move(types_column)), ColumnPtr(std::move(offsets_column))); + } + + String getDynamicValueType(const ColumnPtr & column, size_t i) const + { + const ColumnDynamic * dynamic_column = checkAndGetColumn(column.get()); + const auto & variant_info = dynamic_column->getVariantInfo(); + const auto & variant_column = dynamic_column->getVariantColumn(); + auto global_discr = variant_column.globalDiscriminatorAt(i); + /// We don't output path with NULL values. It should be checked before calling getDynamicValueType. + chassert(global_discr != ColumnVariant::NULL_DISCRIMINATOR); + if (global_discr == dynamic_column->getSharedVariantDiscriminator()) + { + auto value = dynamic_column->getSharedVariant().getDataAt(variant_column.offsetAt(i)); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + return type->getName(); + } + + return variant_info.variant_names[global_discr]; + } + + std::optional getDynamicValueTypeFromSharedData(StringRef value) const + { + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + if (isNothing(type)) + return std::nullopt; + return type->getName(); + } +}; + +} + +REGISTER_FUNCTION(JSONPaths) +{ + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of all paths stored in each row in JSON column. +)", + .syntax = {"JSONAllPaths(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONAllPaths(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONAllPaths(json)─┐ +│ {"a":"42"} │ ['a'] │ +│ {"b":"Hello"} │ ['b'] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a','c'] │ +└──────────────────────────────────────┴────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of all paths and their data types stored in each row in JSON column. +)", + .syntax = {"JSONAllPathsWithTypes(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONAllPathsWithTypes(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONAllPathsWithTypes(json)───────────────┐ +│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {'b':'String'} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))','c':'Date'} │ +└──────────────────────────────────────┴───────────────────────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of dynamic paths that are stored as separate subcolumns in JSON column. +)", + .syntax = {"JSONDynamicPaths(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPaths(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONDynamicPaths(json)─┐ +│ {"a":"42"} │ ['a'] │ +│ {"b":"Hello"} │ [] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['a'] │ +└──────────────────────────────────────┴────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of dynamic paths that are stored as separate subcolumns and their types in each row in JSON column. +)", + .syntax = {"JSONDynamicPathsWithTypes(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPathsWithTypes(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONDynamicPathsWithTypes(json)─┐ +│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))'} │ +└──────────────────────────────────────┴─────────────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of paths that are stored in shared data structure in JSON column. +)", + .syntax = {"JSONDynamicPaths(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONSharedDataPaths(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONSharedDataPaths(json)─┐ +│ {"a":"42"} │ [] │ +│ {"b":"Hello"} │ ['b'] │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ ['c'] │ +└──────────────────────────────────────┴───────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); + + factory.registerFunction>(FunctionDocumentation{ + .description = R"( +Returns the list of paths that are stored in shared data structure and their types in each row in JSON column. +)", + .syntax = {"JSONDynamicPathsWithTypes(json)"}, + .arguments = {{"json", "JSON column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (json JSON(max_dynamic_paths=1)) ENGINE = Memory; +INSERT INTO test FORMAT JSONEachRow {"json" : {"a" : 42}}, {"json" : {"b" : "Hello"}}, {"json" : {"a" : [1, 2, 3], "c" : "2020-01-01"}} +SELECT json, JSONDynamicPathsWithTypes(json) FROM test; +)", + R"( +┌─json─────────────────────────────────┬─JSONDynamicPathsWithTypes(json)─┐ +│ {"a":"42"} │ {'a':'Int64'} │ +│ {"b":"Hello"} │ {} │ +│ {"a":["1","2","3"],"c":"2020-01-01"} │ {'a':'Array(Nullable(Int64))'} │ +└──────────────────────────────────────┴─────────────────────────────────┘ +)"}}}, + .categories{"JSON"}, + }); +} + +} diff --git a/src/Functions/LowerUpperImpl.h b/src/Functions/LowerUpperImpl.h index d463ef96e16..a52703d10c8 100644 --- a/src/Functions/LowerUpperImpl.h +++ b/src/Functions/LowerUpperImpl.h @@ -1,7 +1,6 @@ #pragma once #include - namespace DB { diff --git a/src/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h index eedabca5b22..5da085f48e5 100644 --- a/src/Functions/LowerUpperUTF8Impl.h +++ b/src/Functions/LowerUpperUTF8Impl.h @@ -1,15 +1,14 @@ #pragma once + +#include "config.h" + +#if USE_ICU + #include #include -#include -#include +#include +#include #include -#include - -#ifdef __SSE2__ -#include -#endif - namespace DB { @@ -19,71 +18,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } -/// xor or do nothing -template -UInt8 xor_or_identity(const UInt8 c, const int mask) -{ - return c ^ mask; -} - -template <> -inline UInt8 xor_or_identity(const UInt8 c, const int) -{ - return c; -} - -/// It is caller's responsibility to ensure the presence of a valid cyrillic sequence in array -template -inline void UTF8CyrillicToCase(const UInt8 *& src, UInt8 *& dst) -{ - if (src[0] == 0xD0u && (src[1] >= 0x80u && src[1] <= 0x8Fu)) - { - /// ЀЁЂЃЄЅІЇЈЉЊЋЌЍЎЏ - *dst++ = xor_or_identity(*src++, 0x1); - *dst++ = xor_or_identity(*src++, 0x10); - } - else if (src[0] == 0xD1u && (src[1] >= 0x90u && src[1] <= 0x9Fu)) - { - /// ѐёђѓєѕіїјљњћќѝўџ - *dst++ = xor_or_identity(*src++, 0x1); - *dst++ = xor_or_identity(*src++, 0x10); - } - else if (src[0] == 0xD0u && (src[1] >= 0x90u && src[1] <= 0x9Fu)) - { - /// А-П - *dst++ = *src++; - *dst++ = xor_or_identity(*src++, 0x20); - } - else if (src[0] == 0xD0u && (src[1] >= 0xB0u && src[1] <= 0xBFu)) - { - /// а-п - *dst++ = *src++; - *dst++ = xor_or_identity(*src++, 0x20); - } - else if (src[0] == 0xD0u && (src[1] >= 0xA0u && src[1] <= 0xAFu)) - { - /// Р-Я - *dst++ = xor_or_identity(*src++, 0x1); - *dst++ = xor_or_identity(*src++, 0x20); - } - else if (src[0] == 0xD1u && (src[1] >= 0x80u && src[1] <= 0x8Fu)) - { - /// р-я - *dst++ = xor_or_identity(*src++, 0x1); - *dst++ = xor_or_identity(*src++, 0x20); - } -} - - -/** If the string contains UTF-8 encoded text, convert it to the lower (upper) case. - * Note: It is assumed that after the character is converted to another case, - * the length of its multibyte sequence in UTF-8 does not change. - * Otherwise, the behavior is undefined. - */ -template +template struct LowerUpperUTF8Impl { static void vector( @@ -103,180 +38,46 @@ struct LowerUpperUTF8Impl return; } - res_data.resize_exact(data.size()); - res_offsets.assign(offsets); - array(data.data(), data.data() + data.size(), offsets, res_data.data()); + res_data.resize(data.size()); + res_offsets.resize_exact(offsets.size()); + + String output; + size_t curr_offset = 0; + for (size_t i = 0; i < offsets.size(); ++i) + { + const auto * data_start = reinterpret_cast(&data[offsets[i - 1]]); + size_t size = offsets[i] - offsets[i - 1]; + + icu::UnicodeString input(data_start, static_cast(size), "UTF-8"); + if constexpr (upper) + input.toUpper(); + else + input.toLower(); + + output.clear(); + input.toUTF8String(output); + + /// For valid UTF-8 input strings, ICU sometimes produces output with extra '\0's at the end. Only the data before the first + /// '\0' is valid. It the input is not valid UTF-8, then the behavior of lower/upperUTF8 is undefined by definition. In this + /// case, the behavior is also reasonable. + const char * res_end = find_last_not_symbols_or_null<'\0'>(output.data(), output.data() + output.size()); + size_t valid_size = res_end ? res_end - output.data() + 1 : 0; + + res_data.resize(curr_offset + valid_size + 1); + memcpy(&res_data[curr_offset], output.data(), valid_size); + res_data[curr_offset + valid_size] = 0; + + curr_offset += valid_size + 1; + res_offsets[i] = curr_offset; + } } static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Functions lowerUTF8 and upperUTF8 cannot work with FixedString argument"); } - - /** Converts a single code point starting at `src` to desired case, storing result starting at `dst`. - * `src` and `dst` are incremented by corresponding sequence lengths. */ - static bool toCase(const UInt8 *& src, const UInt8 * src_end, UInt8 *& dst, bool partial) - { - if (src[0] <= ascii_upper_bound) - { - if (*src >= not_case_lower_bound && *src <= not_case_upper_bound) - *dst++ = *src++ ^ flip_case_mask; - else - *dst++ = *src++; - } - else if (src + 1 < src_end - && ((src[0] == 0xD0u && (src[1] >= 0x80u && src[1] <= 0xBFu)) || (src[0] == 0xD1u && (src[1] >= 0x80u && src[1] <= 0x9Fu)))) - { - cyrillic_to_case(src, dst); - } - else if (src + 1 < src_end && src[0] == 0xC2u) - { - /// Punctuation U+0080 - U+00BF, UTF-8: C2 80 - C2 BF - *dst++ = *src++; - *dst++ = *src++; - } - else if (src + 2 < src_end && src[0] == 0xE2u) - { - /// Characters U+2000 - U+2FFF, UTF-8: E2 80 80 - E2 BF BF - *dst++ = *src++; - *dst++ = *src++; - *dst++ = *src++; - } - else - { - size_t src_sequence_length = UTF8::seqLength(*src); - /// In case partial buffer was passed (due to SSE optimization) - /// we cannot convert it with current src_end, but we may have more - /// bytes to convert and eventually got correct symbol. - if (partial && src_sequence_length > static_cast(src_end - src)) - return false; - - auto src_code_point = UTF8::convertUTF8ToCodePoint(src, src_end - src); - if (src_code_point) - { - int dst_code_point = to_case(*src_code_point); - if (dst_code_point > 0) - { - size_t dst_sequence_length = UTF8::convertCodePointToUTF8(dst_code_point, dst, src_end - src); - assert(dst_sequence_length <= 4); - - /// We don't support cases when lowercase and uppercase characters occupy different number of bytes in UTF-8. - /// As an example, this happens for ß and ẞ. - if (dst_sequence_length == src_sequence_length) - { - src += dst_sequence_length; - dst += dst_sequence_length; - return true; - } - } - } - - *dst = *src; - ++dst; - ++src; - } - - return true; - } - -private: - static constexpr auto ascii_upper_bound = '\x7f'; - static constexpr auto flip_case_mask = 'A' ^ 'a'; - - static void array(const UInt8 * src, const UInt8 * src_end, const ColumnString::Offsets & offsets, UInt8 * dst) - { - const auto * offset_it = offsets.begin(); - const UInt8 * begin = src; - -#ifdef __SSE2__ - static constexpr auto bytes_sse = sizeof(__m128i); - - /// If we are before this position, we can still read at least bytes_sse. - const auto * src_end_sse = src_end - bytes_sse + 1; - - /// SSE2 packed comparison operate on signed types, hence compare (c < 0) instead of (c > 0x7f) - const auto v_zero = _mm_setzero_si128(); - const auto v_not_case_lower_bound = _mm_set1_epi8(not_case_lower_bound - 1); - const auto v_not_case_upper_bound = _mm_set1_epi8(not_case_upper_bound + 1); - const auto v_flip_case_mask = _mm_set1_epi8(flip_case_mask); - - while (src < src_end_sse) - { - const auto chars = _mm_loadu_si128(reinterpret_cast(src)); - - /// check for ASCII - const auto is_not_ascii = _mm_cmplt_epi8(chars, v_zero); - const auto mask_is_not_ascii = _mm_movemask_epi8(is_not_ascii); - - /// ASCII - if (mask_is_not_ascii == 0) - { - const auto is_not_case - = _mm_and_si128(_mm_cmpgt_epi8(chars, v_not_case_lower_bound), _mm_cmplt_epi8(chars, v_not_case_upper_bound)); - const auto mask_is_not_case = _mm_movemask_epi8(is_not_case); - - /// everything in correct case ASCII - if (mask_is_not_case == 0) - _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), chars); - else - { - /// ASCII in mixed case - /// keep `flip_case_mask` only where necessary, zero out elsewhere - const auto xor_mask = _mm_and_si128(v_flip_case_mask, is_not_case); - - /// flip case by applying calculated mask - const auto cased_chars = _mm_xor_si128(chars, xor_mask); - - /// store result back to destination - _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), cased_chars); - } - - src += bytes_sse; - dst += bytes_sse; - } - else - { - /// UTF-8 - - /// Find the offset of the next string after src - size_t offset_from_begin = src - begin; - while (offset_from_begin >= *offset_it) - ++offset_it; - - /// Do not allow one row influence another (since row may have invalid sequence, and break the next) - const UInt8 * row_end = begin + *offset_it; - chassert(row_end >= src); - const UInt8 * expected_end = std::min(src + bytes_sse, row_end); - - while (src < expected_end) - { - if (!toCase(src, expected_end, dst, /* partial= */ true)) - { - /// Fallback to handling byte by byte. - src_end_sse = src; - break; - } - } - } - } - - /// Find the offset of the next string after src - size_t offset_from_begin = src - begin; - while (offset_it != offsets.end() && offset_from_begin >= *offset_it) - ++offset_it; -#endif - - /// handle remaining symbols, row by row (to avoid influence of bad UTF8 symbols from one row, to another) - while (src < src_end) - { - const UInt8 * row_end = begin + *offset_it; - chassert(row_end >= src); - - while (src < row_end) - toCase(src, row_end, dst, /* partial= */ false); - ++offset_it; - } - } }; } + +#endif diff --git a/src/Functions/empty.cpp b/src/Functions/empty.cpp index 51811d21a0c..ddb503668cf 100644 --- a/src/Functions/empty.cpp +++ b/src/Functions/empty.cpp @@ -2,10 +2,18 @@ #include #include #include +#include namespace DB { + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; +} + namespace { @@ -13,13 +21,135 @@ struct NameEmpty { static constexpr auto name = "empty"; }; + using FunctionEmpty = FunctionStringOrArrayToT, NameEmpty, UInt8, false>; +/// Implements the empty function for JSON type. +class ExecutableFunctionJSONEmpty : public IExecutableFunction +{ +public: + std::string getName() const override { return NameEmpty::name; } + +private: + bool useDefaultImplementationForConstants() const override { return true; } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override + { + const ColumnWithTypeAndName & elem = arguments[0]; + const auto * object_column = typeid_cast(elem.column.get()); + if (!object_column) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected column type in function {}. Expected Object column, got {}", getName(), elem.column->getName()); + + auto res = DataTypeUInt8().createColumn(); + auto & data = typeid_cast(*res).getData(); + const auto & typed_paths = object_column->getTypedPaths(); + size_t size = object_column->size(); + /// If object column has at least 1 typed path, it will never be empty, because these paths always have values. + if (!typed_paths.empty()) + { + data.resize_fill(size, 0); + return res; + } + + const auto & dynamic_paths = object_column->getDynamicPaths(); + const auto & shared_data = object_column->getSharedDataPtr(); + data.reserve(size); + for (size_t i = 0; i != size; ++i) + { + bool empty = true; + /// Check if there is no paths in shared data. + if (!shared_data->isDefaultAt(i)) + { + empty = false; + } + /// Check that all dynamic paths have NULL value in this row. + else + { + for (const auto & [path, column] : dynamic_paths) + { + if (!column->isNullAt(i)) + { + empty = false; + break; + } + } + } + + data.push_back(empty); + } + + return res; + } +}; + +class FunctionEmptyJSON final : public IFunctionBase +{ +public: + FunctionEmptyJSON(const DataTypes & argument_types_, const DataTypePtr & return_type_) : argument_types(argument_types_), return_type(return_type_) {} + + String getName() const override { return NameEmpty::name; } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } + + const DataTypes & getArgumentTypes() const override { return argument_types; } + const DataTypePtr & getResultType() const override { return return_type; } + + ExecutableFunctionPtr prepare(const ColumnsWithTypeAndName &) const override + { + return std::make_unique(); + } + +private: + DataTypes argument_types; + DataTypePtr return_type; +}; + +class FunctionEmptyOverloadResolver final : public IFunctionOverloadResolver +{ +public: + static constexpr auto name = NameEmpty::name; + + static FunctionOverloadResolverPtr create(ContextPtr) + { + return std::make_unique(); + } + + String getName() const override { return NameEmpty::name; } + size_t getNumberOfArguments() const override { return 1; } + + FunctionBasePtr buildImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type) const override + { + DataTypes argument_types; + argument_types.reserve(arguments.size()); + for (const auto & arg : arguments) + argument_types.push_back(arg.type); + + if (argument_types.size() == 1 && isObject(argument_types[0])) + return std::make_shared(argument_types, return_type); + + return std::make_shared(std::make_shared(), argument_types, return_type); + } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + if (!isStringOrFixedString(arguments[0]) + && !isArray(arguments[0]) + && !isMap(arguments[0]) + && !isUUID(arguments[0]) + && !isIPv6(arguments[0]) + && !isIPv4(arguments[0]) + && !isObject(arguments[0])) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument of function {}", arguments[0]->getName(), getName()); + + return std::make_shared(); + } +}; + } REGISTER_FUNCTION(Empty) { - factory.registerFunction(); + factory.registerFunction(); } } diff --git a/src/Functions/formatQuery.cpp b/src/Functions/formatQuery.cpp index 9591ea95254..be633bdfe37 100644 --- a/src/Functions/formatQuery.cpp +++ b/src/Functions/formatQuery.cpp @@ -43,6 +43,7 @@ public: max_query_size = settings.max_query_size; max_parser_depth = settings.max_parser_depth; max_parser_backtracks = settings.max_parser_backtracks; + print_pretty_type_names = settings.print_pretty_type_names; } String getName() const override { return name; } @@ -138,7 +139,11 @@ private: } } - formatAST(*ast, buf, /*hilite*/ false, /*single_line*/ output_formatting == OutputFormatting::SingleLine); + IAST::FormatSettings settings(buf, output_formatting == OutputFormatting::SingleLine, /*hilite*/ false); + settings.show_secrets = true; + settings.print_pretty_type_names = print_pretty_type_names; + ast->format(settings); + auto formatted = buf.stringView(); const size_t res_data_new_size = res_data_size + formatted.size() + 1; @@ -165,6 +170,7 @@ private: size_t max_query_size; size_t max_parser_depth; size_t max_parser_backtracks; + bool print_pretty_type_names; }; } diff --git a/src/Functions/initcapUTF8.cpp b/src/Functions/initcapUTF8.cpp index 282d846094e..004586dce26 100644 --- a/src/Functions/initcapUTF8.cpp +++ b/src/Functions/initcapUTF8.cpp @@ -1,9 +1,8 @@ #include #include -#include #include #include - +#include namespace DB { diff --git a/src/Functions/lowerUTF8.cpp b/src/Functions/lowerUTF8.cpp index 7adb0069121..e2f7cb84730 100644 --- a/src/Functions/lowerUTF8.cpp +++ b/src/Functions/lowerUTF8.cpp @@ -1,9 +1,10 @@ -#include +#include "config.h" + +#if USE_ICU + +#include #include #include -#include -#include - namespace DB { @@ -15,13 +16,25 @@ struct NameLowerUTF8 static constexpr auto name = "lowerUTF8"; }; -using FunctionLowerUTF8 = FunctionStringToString>, NameLowerUTF8>; +using FunctionLowerUTF8 = FunctionStringToString, NameLowerUTF8>; } REGISTER_FUNCTION(LowerUTF8) { - factory.registerFunction(); + FunctionDocumentation::Description description + = R"(Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.)"; + FunctionDocumentation::Syntax syntax = "lowerUTF8(input)"; + FunctionDocumentation::Arguments arguments = {{"input", "Input with String type"}}; + FunctionDocumentation::ReturnedValue returned_value = "A String data type value"; + FunctionDocumentation::Examples examples = { + {"first", "SELECT lowerUTF8('München') as Lowerutf8;", "münchen"}, + }; + FunctionDocumentation::Categories categories = {"String"}; + + factory.registerFunction({description, syntax, arguments, returned_value, examples, categories}); } } + +#endif diff --git a/src/Functions/upperUTF8.cpp b/src/Functions/upperUTF8.cpp index 659e67f0ef3..ef26430331f 100644 --- a/src/Functions/upperUTF8.cpp +++ b/src/Functions/upperUTF8.cpp @@ -1,8 +1,10 @@ +#include "config.h" + +#if USE_ICU + +#include #include #include -#include -#include - namespace DB { @@ -14,13 +16,25 @@ struct NameUpperUTF8 static constexpr auto name = "upperUTF8"; }; -using FunctionUpperUTF8 = FunctionStringToString>, NameUpperUTF8>; +using FunctionUpperUTF8 = FunctionStringToString, NameUpperUTF8>; } REGISTER_FUNCTION(UpperUTF8) { - factory.registerFunction(); + FunctionDocumentation::Description description + = R"(Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.)"; + FunctionDocumentation::Syntax syntax = "upperUTF8(input)"; + FunctionDocumentation::Arguments arguments = {{"input", "Input with String type"}}; + FunctionDocumentation::ReturnedValue returned_value = "A String data type value"; + FunctionDocumentation::Examples examples = { + {"first", "SELECT upperUTF8('München') as Upperutf8;", "MÜNCHEN"}, + }; + FunctionDocumentation::Categories categories = {"String"}; + + factory.registerFunction({description, syntax, arguments, returned_value, examples, categories}); } } + +#endif diff --git a/src/IO/NetUtils.h b/src/IO/NetUtils.h new file mode 100644 index 00000000000..12f09524ae7 --- /dev/null +++ b/src/IO/NetUtils.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +template +constexpr T netToHost(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T hostToNet(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T toLittleEndian(T value) noexcept +{ + if constexpr (std::endian::native == std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T toBigEndian(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T fromLittleEndian(T value) noexcept +{ + if constexpr (std::endian::native == std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T fromBigEndian(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +} diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index 26cdee4140c..bbf9f96404f 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -bool ReadBufferFromPocoSocket::nextImpl() +ssize_t ReadBufferFromPocoSocketBase::socketReceiveBytesImpl(char * ptr, size_t size) { ssize_t bytes_read = 0; Stopwatch watch; @@ -43,14 +43,11 @@ bool ReadBufferFromPocoSocket::nextImpl() ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read); }); + CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); + /// Add more details to exceptions. try { - CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); - - if (internal_buffer.size() > INT_MAX) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); - /// If async_callback is specified, set socket to non-blocking mode /// and try to read data from it, if socket is not ready for reading, /// run async_callback and try again later. @@ -61,7 +58,7 @@ bool ReadBufferFromPocoSocket::nextImpl() socket.setBlocking(false); SCOPE_EXIT(socket.setBlocking(true)); bool secure = socket.secure(); - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); /// Check EAGAIN and ERR_SSL_WANT_READ/ERR_SSL_WANT_WRITE for secure socket (reading from secure socket can write too). while (bytes_read < 0 && (errno == EAGAIN || (secure && (checkSSLWantRead(bytes_read) || checkSSLWantWrite(bytes_read))))) @@ -73,12 +70,12 @@ bool ReadBufferFromPocoSocket::nextImpl() async_callback(socket.impl()->sockfd(), socket.getReceiveTimeout(), AsyncEventTimeoutType::RECEIVE, socket_description, AsyncTaskExecutor::Event::READ | AsyncTaskExecutor::Event::ERROR); /// Try to read again. - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); } } else { - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); } } catch (const Poco::Net::NetException & e) @@ -99,6 +96,16 @@ bool ReadBufferFromPocoSocket::nextImpl() if (bytes_read < 0) throw NetException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot read from socket (peer: {}, local: {})", peer_address.toString(), socket.address().toString()); + return bytes_read; +} + +bool ReadBufferFromPocoSocketBase::nextImpl() +{ + if (internal_buffer.size() > INT_MAX) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); + + ssize_t bytes_read = socketReceiveBytesImpl(internal_buffer.begin(), internal_buffer.size()); + if (read_event != ProfileEvents::end()) ProfileEvents::increment(read_event, bytes_read); @@ -110,7 +117,7 @@ bool ReadBufferFromPocoSocket::nextImpl() return true; } -ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size) +ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, size_t buf_size) : BufferWithOwnMemory(buf_size) , socket(socket_) , peer_address(socket.peerAddress()) @@ -119,19 +126,22 @@ ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, { } -ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) - : ReadBufferFromPocoSocket(socket_, buf_size) +ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) + : ReadBufferFromPocoSocketBase(socket_, buf_size) { read_event = read_event_; } -bool ReadBufferFromPocoSocket::poll(size_t timeout_microseconds) const +bool ReadBufferFromPocoSocketBase::poll(size_t timeout_microseconds) const { - if (available()) + /// For secure socket it is important to check if any remaining data available in underlying decryption buffer - + /// read always retrieves the whole encrypted frame from the wire and puts it into underlying buffer while returning only requested size - + /// further poll() can block though there is still data to read in the underlying decryption buffer. + if (available() || socket.impl()->available()) return true; Stopwatch watch; - bool res = socket.poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); + bool res = socket.impl()->poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); return res; } diff --git a/src/IO/ReadBufferFromPocoSocket.h b/src/IO/ReadBufferFromPocoSocket.h index 76156612764..912388adaac 100644 --- a/src/IO/ReadBufferFromPocoSocket.h +++ b/src/IO/ReadBufferFromPocoSocket.h @@ -9,7 +9,7 @@ namespace DB { /// Works with the ready Poco::Net::Socket. Blocking operations. -class ReadBufferFromPocoSocket : public BufferWithOwnMemory +class ReadBufferFromPocoSocketBase : public BufferWithOwnMemory { protected: Poco::Net::Socket & socket; @@ -25,16 +25,29 @@ protected: bool nextImpl() override; public: - explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); - explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); bool poll(size_t timeout_microseconds) const; void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); } + ssize_t socketReceiveBytesImpl(char * ptr, size_t size); + private: AsyncCallback async_callback; std::string socket_description; }; +class ReadBufferFromPocoSocket : public ReadBufferFromPocoSocketBase +{ +public: + explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) + : ReadBufferFromPocoSocketBase(socket_, buf_size) + {} + explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) + : ReadBufferFromPocoSocketBase(socket_, read_event_, buf_size) + {} +}; + } diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp new file mode 100644 index 00000000000..4a1e3732a55 --- /dev/null +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -0,0 +1,166 @@ +#include +#include +#include + + +namespace DB::ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace DB +{ + +ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size) + : ReadBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size) +{} + +ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) + : ReadBufferFromPocoSocketBase( + socket_, read_event_, + std::min(buf_size, static_cast(std::numeric_limits::max()))), + our_address(socket_.address()), log(getLogger("Protocol")) +{} + +void ReadBufferFromPocoSocketChunked::enableChunked() +{ + if (chunked) + return; + chunked = 1; + data_end = buffer().end(); + /// Resize working buffer so any next read will call nextImpl + working_buffer.resize(offset()); + chunk_left = 0; + next_chunk = 0; +} + +bool ReadBufferFromPocoSocketChunked::hasBufferedData() const +{ + if (available()) + return true; + + return chunked && (static_cast(data_end - working_buffer.end()) > sizeof(next_chunk)); +} + +bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) const +{ + if (chunked) + if (available() || static_cast(data_end - working_buffer.end()) > sizeof(next_chunk)) + return true; + + return ReadBufferFromPocoSocketBase::poll(timeout_microseconds); +} + + +bool ReadBufferFromPocoSocketChunked::loadNextChunk(Position c_pos, bool cont) +{ + auto buffered = std::min(static_cast(data_end - c_pos), sizeof(next_chunk)); + + if (buffered) + std::memcpy(&next_chunk, c_pos, buffered); + if (buffered < sizeof(next_chunk)) + if (socketReceiveBytesImpl(reinterpret_cast(&next_chunk) + buffered, sizeof(next_chunk) - buffered) < static_cast(sizeof(next_chunk) - buffered)) + return false; + next_chunk = fromLittleEndian(next_chunk); + + if (next_chunk) + { + if (cont) + LOG_TEST(log, "{} <- {} Chunk receive continued. Size {}", ourAddress().toString(), peerAddress().toString(), next_chunk); + } + else + LOG_TEST(log, "{} <- {} Chunk receive ended.", ourAddress().toString(), peerAddress().toString()); + + return true; +} + +bool ReadBufferFromPocoSocketChunked::processChunkLeft(Position c_pos) +{ + if (data_end - c_pos < chunk_left) + { + working_buffer.resize(data_end - buffer().begin()); + nextimpl_working_buffer_offset = c_pos - buffer().begin(); + chunk_left -= (data_end - c_pos); + return true; + } + + nextimpl_working_buffer_offset = c_pos - buffer().begin(); + working_buffer.resize(nextimpl_working_buffer_offset + chunk_left); + + c_pos += chunk_left; + + if (!loadNextChunk(c_pos, true)) + return false; + + chunk_left = 0; + return true; +} + + +bool ReadBufferFromPocoSocketChunked::nextImpl() +{ + if (!chunked) + return ReadBufferFromPocoSocketBase::nextImpl(); + + auto * c_pos = pos; + + if (chunk_left == 0) + { + if (next_chunk == 0) + { + if (chunked == 1) + chunked = 2; // first chunked block - no end marker + else + c_pos = pos + sizeof(next_chunk); // bypass chunk end marker + + if (c_pos > data_end) + c_pos = data_end; + + if (!loadNextChunk(c_pos)) + return false; + + chunk_left = next_chunk; + next_chunk = 0; + + if (chunk_left == 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received"); + + c_pos += sizeof(next_chunk); + + if (c_pos >= data_end) + { + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); + } + + LOG_TEST(log, "{} <- {} Chunk receive started. Message {}, size {}", ourAddress().toString(), peerAddress().toString(), static_cast(*c_pos), chunk_left); + } + else + { + c_pos += sizeof(next_chunk); + if (c_pos >= data_end) + { + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); + } + + chunk_left = next_chunk; + next_chunk = 0; + } + } + else + { + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); + } + + return processChunkLeft(c_pos); +} + +} diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h new file mode 100644 index 00000000000..8bc4024b978 --- /dev/null +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -0,0 +1,109 @@ +#pragma once + +#include +#include + +/* + +Handshake +============= + | 'Hello' type + | handshake exchange + | chunked protocol negotiation + +============= + + +Basic chunk: + +============= +Chunk begins | 0x12345678 chunk size, 4 bytes little endian + +------------- + | Packet type always follows beginning of the chunk + | packet data + +------------- +Chunk ends | 0x00000000 4 zero bytes + +============= + + +Datastream chunk: + +============= +Chunk begins | 0x12345678 + +------------- + | Packet type + | packet data + +------------- + | Packet type + | packet data + +------------- +...arbitrary number ..... +of packets... ..... + +------------- + | Packet type + | packet data + +------------- +Chunk ends | 0x00000000 + +============= + + +Multipart chunk: + +============= +Chunk begins | 0x12345678 chunk part size, 4 bytes little endian + +------------- + | Packet type + | packet data + +------------- + | Packet type + | (partial) packet data + +============= +Chunk continues | 0x12345678 chunk next part size, 4 bytes little endian + +============= + | possibly previous packet's data + +------------- + | Packet type + | packet data + +------------- +...arbitrary number ..... +of chunk parts... ..... + +------------- + | Packet type + | packet data + +------------- +Chunk ends | 0x00000000 + +============= + +*/ + +namespace DB +{ + +class ReadBufferFromPocoSocketChunked: public ReadBufferFromPocoSocketBase +{ +public: + using ReadBufferFromPocoSocketBase::setAsyncCallback; + + explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + + void enableChunked(); + + bool hasBufferedData() const; + + bool poll(size_t timeout_microseconds) const; + + Poco::Net::SocketAddress peerAddress() { return peer_address; } + Poco::Net::SocketAddress ourAddress() { return our_address; } + +protected: + bool loadNextChunk(Position c_pos, bool cont = false); + bool processChunkLeft(Position c_pos); + bool nextImpl() override; + + Poco::Net::SocketAddress our_address; + +private: + LoggerPtr log; + Position data_end = nullptr; // end position of data in the internal_buffer + UInt32 chunk_left = 0; // chunk left to read from socket + UInt32 next_chunk = 0; // size of the next cnunk + UInt8 chunked = 0; // 0 - disabled; 1 - started; 2 - enabled; +}; + +} diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index 9559462e62b..a38dc1ecefb 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -855,6 +856,12 @@ void readBackQuotedString(String & s, ReadBuffer & buf) readBackQuotedStringInto(s, buf); } +bool tryReadBackQuotedString(String & s, ReadBuffer & buf) +{ + s.clear(); + return readAnyQuotedStringInto<'`', false, String, bool>(s, buf); +} + void readBackQuotedStringWithSQLStyle(String & s, ReadBuffer & buf) { s.clear(); @@ -1270,8 +1277,83 @@ ReturnType readJSONArrayInto(Vector & s, ReadBuffer & buf) template void readJSONArrayInto, void>(PaddedPODArray & s, ReadBuffer & buf); template bool readJSONArrayInto, bool>(PaddedPODArray & s, ReadBuffer & buf); +std::string_view readJSONObjectAsViewPossiblyInvalid(ReadBuffer & buf, String & object_buffer) +{ + if (buf.eof() || *buf.position() != '{') + throw Exception(ErrorCodes::INCORRECT_DATA, "JSON object should start with '{{'"); + + char * start = buf.position(); + bool use_object_buffer = false; + object_buffer.clear(); + + ++buf.position(); + Int64 balance = 1; + bool quotes = false; + + while (true) + { + if (!buf.hasPendingData() && !use_object_buffer) + { + use_object_buffer = true; + object_buffer.append(start, buf.position() - start); + } + + if (buf.eof()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected EOF while reading JSON object"); + + char * next_pos = find_first_symbols<'\\', '{', '}', '"'>(buf.position(), buf.buffer().end()); + if (use_object_buffer) + object_buffer.append(buf.position(), next_pos - buf.position()); + buf.position() = next_pos; + + if (!buf.hasPendingData()) + continue; + + if (use_object_buffer) + object_buffer.push_back(*buf.position()); + + if (*buf.position() == '\\') + { + ++buf.position(); + if (!buf.hasPendingData() && !use_object_buffer) + { + use_object_buffer = true; + object_buffer.append(start, buf.position() - start); + } + + if (buf.eof()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected EOF while reading JSON object"); + + if (use_object_buffer) + object_buffer.push_back(*buf.position()); + ++buf.position(); + + continue; + } + + if (*buf.position() == '"') + quotes = !quotes; + else if (!quotes) // can be only opening_bracket or closing_bracket + balance += *buf.position() == '{' ? 1 : -1; + + ++buf.position(); + + if (balance == 0) + { + if (use_object_buffer) + return object_buffer; + return {start, buf.position()}; + } + + if (balance < 0) + break; + } + + throw Exception(ErrorCodes::INCORRECT_DATA, "JSON object should have equal number of opening and closing brackets"); +} + template -ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) +ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters) { static constexpr bool throw_exception = std::is_same_v; @@ -1318,6 +1400,9 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) } else { + if (!isSymbolIn(*buf.position(), allowed_delimiters)) + return error(); + ++buf.position(); if (!append_digit(month)) @@ -1325,7 +1410,11 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) append_digit(month); if (!buf.eof() && !isNumericASCII(*buf.position())) + { + if (!isSymbolIn(*buf.position(), allowed_delimiters)) + return error(); ++buf.position(); + } else return error(); @@ -1338,12 +1427,12 @@ ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf) return ReturnType(true); } -template void readDateTextFallback(LocalDate &, ReadBuffer &); -template bool readDateTextFallback(LocalDate &, ReadBuffer &); +template void readDateTextFallback(LocalDate &, ReadBuffer &, const char * allowed_delimiters); +template bool readDateTextFallback(LocalDate &, ReadBuffer &, const char * allowed_delimiters); template -ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut) +ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters, const char * allowed_time_delimiters) { static constexpr bool throw_exception = std::is_same_v; @@ -1400,10 +1489,8 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D size_t size = buf.read(s_pos, remaining_date_size); if (size != remaining_date_size) { - s_pos[size] = 0; - if constexpr (throw_exception) - throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot parse DateTime {}", s); + throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot parse DateTime {}", std::string_view(s, already_read_length + size)); else return false; } @@ -1413,6 +1500,9 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[2]) || !isNumericASCII(s[3]) || !isNumericASCII(s[5]) || !isNumericASCII(s[6]) || !isNumericASCII(s[8]) || !isNumericASCII(s[9])) return false; + + if (!isSymbolIn(s[4], allowed_date_delimiters) || !isSymbolIn(s[7], allowed_date_delimiters)) + return false; } UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); @@ -1430,10 +1520,8 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D if (size != time_broken_down_length) { - s_pos[size] = 0; - if constexpr (throw_exception) - throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot parse time component of DateTime {}", s); + throw Exception(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot parse time component of DateTime {}", std::string_view(s, size)); else return false; } @@ -1443,6 +1531,9 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[3]) || !isNumericASCII(s[4]) || !isNumericASCII(s[6]) || !isNumericASCII(s[7])) return false; + + if (!isSymbolIn(s[2], allowed_time_delimiters) || !isSymbolIn(s[5], allowed_time_delimiters)) + return false; } hour = (s[0] - '0') * 10 + (s[1] - '0'); @@ -1488,10 +1579,10 @@ ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const D return ReturnType(true); } -template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); -template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); -template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); -template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); +template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *); +template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *); +template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *); +template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &, const char *, const char *); template @@ -1905,6 +1996,11 @@ static ReturnType readParsedValueInto(Vector & s, ReadBuffer & buf, ParseFunc pa return ReturnType(true); } +void readParsedValueIntoString(String & s, ReadBuffer & buf, std::function parse_func) +{ + readParsedValueInto(s, buf, std::move(parse_func)); +} + template static ReturnType readQuotedStringFieldInto(Vector & s, ReadBuffer & buf) { diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index ffba4fafb5c..05198361ca2 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -258,6 +258,20 @@ inline void readBoolText(bool & x, ReadBuffer & buf) char tmp = '0'; readChar(tmp, buf); x = tmp != '0'; + + if (!buf.eof() && isAlphaASCII(tmp)) + { + if (tmp == 't' || tmp == 'T') + { + assertStringCaseInsensitive("rue", buf); + x = true; + } + else if (tmp == 'f' || tmp == 'F') + { + assertStringCaseInsensitive("alse", buf); + x = false; + } + } } template @@ -600,6 +614,7 @@ bool tryReadDoubleQuotedStringWithSQLStyle(String & s, ReadBuffer & buf); void readJSONString(String & s, ReadBuffer & buf, const FormatSettings::JSON & settings); void readBackQuotedString(String & s, ReadBuffer & buf); +bool tryReadBackQuotedString(String & s, ReadBuffer & buf); void readBackQuotedStringWithSQLStyle(String & s, ReadBuffer & buf); void readStringUntilEOF(String & s, ReadBuffer & buf); @@ -687,6 +702,10 @@ ReturnType readJSONObjectPossiblyInvalid(Vector & s, ReadBuffer & buf); template ReturnType readJSONArrayInto(Vector & s, ReadBuffer & buf); +/// Similar to readJSONObjectPossiblyInvalid but avoids copying the data if JSON object fits into current read buffer +/// If copying is unavoidable, it copies data into provided object_buffer and returns string_view to it. +std::string_view readJSONObjectAsViewPossiblyInvalid(ReadBuffer & buf, String & object_buffer); + template void readStringUntilWhitespaceInto(Vector & s, ReadBuffer & buf); @@ -703,13 +722,28 @@ struct NullOutput }; template -ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf); +ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters); + +inline bool isSymbolIn(char symbol, const char * symbols) +{ + if (symbols == nullptr) + return true; + + const char * pos = symbols; + while (*pos) + { + if (*pos == symbol) + return true; + ++pos; + } + return false; +} /// In YYYY-MM-DD format. /// For convenience, Month and Day parts can have single digit instead of two digits. /// Any separators other than '-' are supported. template -inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) +inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; @@ -753,6 +787,9 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) } else { + if (!isSymbolIn(pos[-1], allowed_delimiters)) + return error(); + if (!isNumericASCII(pos[0])) return error(); @@ -768,6 +805,9 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) if (isNumericASCII(pos[-1]) || !isNumericASCII(pos[0])) return error(); + if (!isSymbolIn(pos[-1], allowed_delimiters)) + return error(); + day = pos[0] - '0'; if (isNumericASCII(pos[1])) { @@ -783,7 +823,7 @@ inline ReturnType readDateTextImpl(LocalDate & date, ReadBuffer & buf) return ReturnType(true); } else - return readDateTextFallback(date, buf); + return readDateTextFallback(date, buf, allowed_delimiters); } inline void convertToDayNum(DayNum & date, ExtendedDayNum & from) @@ -797,15 +837,15 @@ inline void convertToDayNum(DayNum & date, ExtendedDayNum & from) } template -inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut) +inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; LocalDate local_date; if constexpr (throw_exception) - readDateTextImpl(local_date, buf); - else if (!readDateTextImpl(local_date, buf)) + readDateTextImpl(local_date, buf, allowed_delimiters); + else if (!readDateTextImpl(local_date, buf, allowed_delimiters)) return false; ExtendedDayNum ret = date_lut.makeDayNum(local_date.year(), local_date.month(), local_date.day()); @@ -814,15 +854,15 @@ inline ReturnType readDateTextImpl(DayNum & date, ReadBuffer & buf, const DateLU } template -inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut) +inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; LocalDate local_date; if constexpr (throw_exception) - readDateTextImpl(local_date, buf); - else if (!readDateTextImpl(local_date, buf)) + readDateTextImpl(local_date, buf, allowed_delimiters); + else if (!readDateTextImpl(local_date, buf, allowed_delimiters)) return false; /// When the parameter is out of rule or out of range, Date32 uses 1925-01-01 as the default value (-DateLUT::instance().getDayNumOffsetEpoch(), -16436) and Date uses 1970-01-01. @@ -846,19 +886,19 @@ inline void readDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTI readDateTextImpl(date, buf, date_lut); } -inline bool tryReadDateText(LocalDate & date, ReadBuffer & buf) +inline bool tryReadDateText(LocalDate & date, ReadBuffer & buf, const char * allowed_delimiters = nullptr) { - return readDateTextImpl(date, buf); + return readDateTextImpl(date, buf, allowed_delimiters); } -inline bool tryReadDateText(DayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline bool tryReadDateText(DayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_delimiters = nullptr) { - return readDateTextImpl(date, buf, time_zone); + return readDateTextImpl(date, buf, time_zone, allowed_delimiters); } -inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_delimiters = nullptr) { - return readDateTextImpl(date, buf, time_zone); + return readDateTextImpl(date, buf, time_zone, allowed_delimiters); } UUID parseUUID(std::span src); @@ -975,13 +1015,13 @@ inline T parseFromString(std::string_view str) template -ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut); +ReturnType readDateTimeTextFallback(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr); /** In YYYY-MM-DD hh:mm:ss or YYYY-MM-DD format, according to specified time zone. * As an exception, also supported parsing of unix timestamp in form of decimal number. */ template -inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut) +inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; @@ -1014,6 +1054,9 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons if (!isNumericASCII(s[0]) || !isNumericASCII(s[1]) || !isNumericASCII(s[2]) || !isNumericASCII(s[3]) || !isNumericASCII(s[5]) || !isNumericASCII(s[6]) || !isNumericASCII(s[8]) || !isNumericASCII(s[9])) return ReturnType(false); + + if (!isSymbolIn(s[4], allowed_date_delimiters) || !isSymbolIn(s[7], allowed_date_delimiters)) + return ReturnType(false); } UInt16 year = (s[0] - '0') * 1000 + (s[1] - '0') * 100 + (s[2] - '0') * 10 + (s[3] - '0'); @@ -1033,6 +1076,9 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons if (!isNumericASCII(s[11]) || !isNumericASCII(s[12]) || !isNumericASCII(s[14]) || !isNumericASCII(s[15]) || !isNumericASCII(s[17]) || !isNumericASCII(s[18])) return ReturnType(false); + + if (!isSymbolIn(s[13], allowed_time_delimiters) || !isSymbolIn(s[16], allowed_time_delimiters)) + return ReturnType(false); } hour = (s[11] - '0') * 10 + (s[12] - '0'); @@ -1057,11 +1103,11 @@ inline ReturnType readDateTimeTextImpl(time_t & datetime, ReadBuffer & buf, cons return readIntTextImpl(datetime, buf); } else - return readDateTimeTextFallback(datetime, buf, date_lut); + return readDateTimeTextFallback(datetime, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters); } template -inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut) +inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut, const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr) { static constexpr bool throw_exception = std::is_same_v; @@ -1075,7 +1121,7 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re { try { - readDateTimeTextImpl(whole, buf, date_lut); + readDateTimeTextImpl(whole, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters); } catch (const DB::Exception &) { @@ -1085,7 +1131,7 @@ inline ReturnType readDateTimeTextImpl(DateTime64 & datetime64, UInt32 scale, Re } else { - auto ok = readDateTimeTextImpl(whole, buf, date_lut); + auto ok = readDateTimeTextImpl(whole, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters); if (!ok && (buf.eof() || *buf.position() != '.')) return ReturnType(false); } @@ -1168,14 +1214,14 @@ inline void readDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer readDateTimeTextImpl(datetime64, scale, buf, date_lut); } -inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance()) +inline bool tryReadDateTimeText(time_t & datetime, ReadBuffer & buf, const DateLUTImpl & time_zone = DateLUT::instance(), const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr) { - return readDateTimeTextImpl(datetime, buf, time_zone); + return readDateTimeTextImpl(datetime, buf, time_zone, allowed_date_delimiters, allowed_time_delimiters); } -inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance()) +inline bool tryReadDateTime64Text(DateTime64 & datetime64, UInt32 scale, ReadBuffer & buf, const DateLUTImpl & date_lut = DateLUT::instance(), const char * allowed_date_delimiters = nullptr, const char * allowed_time_delimiters = nullptr) { - return readDateTimeTextImpl(datetime64, scale, buf, date_lut); + return readDateTimeTextImpl(datetime64, scale, buf, date_lut, allowed_date_delimiters, allowed_time_delimiters); } inline void readDateTimeText(LocalDateTime & datetime, ReadBuffer & buf) @@ -1708,6 +1754,7 @@ inline T parse(const char * data, size_t size) T res; ReadBufferFromMemory buf(data, size); readText(res, buf); + assertEOF(buf); return res; } @@ -1715,7 +1762,9 @@ template inline bool tryParse(T & res, const char * data, size_t size) { ReadBufferFromMemory buf(data, size); - return tryReadText(res, buf); + if (!tryReadText(res, buf)) + return false; + return buf.eof(); } template @@ -1893,6 +1942,8 @@ struct PcgDeserializer } }; +void readParsedValueIntoString(String & s, ReadBuffer & buf, std::function parse_func); + template ReturnType readQuotedFieldInto(Vector & s, ReadBuffer & buf); diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index 4b2e6580f9b..a7bc0d4845c 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -238,7 +238,7 @@ ReadWriteBufferFromHTTP::ReadWriteBufferFromHTTP( if (iter == http_header_entries.end()) { - http_header_entries.emplace_back(user_agent, fmt::format("ClickHouse/{}", VERSION_STRING)); + http_header_entries.emplace_back(user_agent, fmt::format("ClickHouse/{}{}", VERSION_STRING, VERSION_OFFICIAL)); } if (!delay_initialization && use_external_buffer) diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index a966e370ca1..d4c41a3f2cd 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -46,7 +46,7 @@ namespace ProfileEvents namespace CurrentMetrics { - extern const Metric S3DiskNoKeyErrors; + extern const Metric DiskS3NoSuchKeyErrors; } namespace DB @@ -701,7 +701,7 @@ RequestResult Client::processRequestResult(RequestResult && outcome) const return std::forward(outcome); if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) - CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); + CurrentMetrics::add(CurrentMetrics::DiskS3NoSuchKeyErrors); String enriched_message = fmt::format( "{} {}", @@ -982,10 +982,10 @@ PocoHTTPClientConfiguration ClientFactory::createClientConfiguration( // NOLINT { auto context = Context::getGlobalContextInstance(); chassert(context); - auto proxy_configuration_resolver = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::protocolFromString(protocol), context->getConfigRef()); + auto proxy_configuration_resolver = ProxyConfigurationResolverProvider::get(ProxyConfiguration::protocolFromString(protocol), context->getConfigRef()); - auto per_request_configuration = [=] () { return proxy_configuration_resolver->resolve(); }; - auto error_report = [=] (const DB::ProxyConfiguration & req) { proxy_configuration_resolver->errorReport(req); }; + auto per_request_configuration = [=]{ return proxy_configuration_resolver->resolve(); }; + auto error_report = [=](const ProxyConfiguration & req) { proxy_configuration_resolver->errorReport(req); }; auto config = PocoHTTPClientConfiguration( per_request_configuration, diff --git a/src/IO/S3/Credentials.cpp b/src/IO/S3/Credentials.cpp index dfb7727fca4..d6f7542da6b 100644 --- a/src/IO/S3/Credentials.cpp +++ b/src/IO/S3/Credentials.cpp @@ -145,12 +145,16 @@ Aws::String AWSEC2MetadataClient::getDefaultCredentialsSecurely() const { String user_agent_string = awsComputeUserAgentString(); auto [new_token, response_code] = getEC2MetadataToken(user_agent_string); - if (response_code == Aws::Http::HttpResponseCode::BAD_REQUEST) + if (response_code == Aws::Http::HttpResponseCode::BAD_REQUEST + || response_code == Aws::Http::HttpResponseCode::REQUEST_NOT_MADE) + { + /// At least the host should be available and reply, otherwise neither IMDSv2 nor IMDSv1 are usable. return {}; + } else if (response_code != Aws::Http::HttpResponseCode::OK || new_token.empty()) { LOG_TRACE(logger, "Calling EC2MetadataService to get token failed, " - "falling back to less secure way. HTTP response code: {}", response_code); + "falling back to a less secure way. HTTP response code: {}", response_code); return getDefaultCredentials(); } @@ -247,7 +251,7 @@ static Aws::String getAWSMetadataEndpoint() return ec2_metadata_service_endpoint; } -std::shared_ptr InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration) +std::shared_ptr createEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration) { auto endpoint = getAWSMetadataEndpoint(); return std::make_shared(client_configuration, endpoint.c_str()); @@ -781,11 +785,13 @@ S3CredentialsProviderChain::S3CredentialsProviderChain( /// EC2MetadataService throttles by delaying the response so the service client should set a large read timeout. /// EC2MetadataService delay is in order of seconds so it only make sense to retry after a couple of seconds. - aws_client_configuration.connectTimeoutMs = 1000; + /// But the connection timeout should be small because there is the case when there is no IMDS at all, + /// like outside of the cloud, on your own machines. + aws_client_configuration.connectTimeoutMs = 10; aws_client_configuration.requestTimeoutMs = 1000; aws_client_configuration.retryStrategy = std::make_shared(1, 1000); - auto ec2_metadata_client = InitEC2MetadataClient(aws_client_configuration); + auto ec2_metadata_client = createEC2MetadataClient(aws_client_configuration); auto config_loader = std::make_shared(ec2_metadata_client, !credentials_configuration.use_insecure_imds_request); AddProvider(std::make_shared(config_loader)); diff --git a/src/IO/S3/Credentials.h b/src/IO/S3/Credentials.h index 95297ab0538..042c48ec15a 100644 --- a/src/IO/S3/Credentials.h +++ b/src/IO/S3/Credentials.h @@ -70,7 +70,7 @@ private: LoggerPtr logger; }; -std::shared_ptr InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration); +std::shared_ptr createEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration); class AWSEC2InstanceProfileConfigLoader : public Aws::Config::AWSProfileConfigLoader { diff --git a/src/IO/S3/PocoHTTPClient.cpp b/src/IO/S3/PocoHTTPClient.cpp index aab7a39534d..3e060e21c51 100644 --- a/src/IO/S3/PocoHTTPClient.cpp +++ b/src/IO/S3/PocoHTTPClient.cpp @@ -1,4 +1,5 @@ #include +#include #include "config.h" #if USE_AWS_S3 @@ -17,6 +18,7 @@ #include #include #include +#include #include #include @@ -29,6 +31,7 @@ #include + static const int SUCCESS_RESPONSE_MIN = 200; static const int SUCCESS_RESPONSE_MAX = 299; @@ -84,7 +87,7 @@ namespace DB::S3 { PocoHTTPClientConfiguration::PocoHTTPClientConfiguration( - std::function per_request_configuration_, + std::function per_request_configuration_, const String & force_region_, const RemoteHostFilter & remote_host_filter_, unsigned int s3_max_redirects_, @@ -94,7 +97,7 @@ PocoHTTPClientConfiguration::PocoHTTPClientConfiguration( bool s3_use_adaptive_timeouts_, const ThrottlerPtr & get_request_throttler_, const ThrottlerPtr & put_request_throttler_, - std::function error_report_) + std::function error_report_) : per_request_configuration(per_request_configuration_) , force_region(force_region_) , remote_host_filter(remote_host_filter_) @@ -107,6 +110,8 @@ PocoHTTPClientConfiguration::PocoHTTPClientConfiguration( , s3_use_adaptive_timeouts(s3_use_adaptive_timeouts_) , error_report(error_report_) { + /// This is used to identify configurations created by us. + userAgent = std::string(VERSION_FULL) + VERSION_OFFICIAL; } void PocoHTTPClientConfiguration::updateSchemeAndRegion() @@ -128,7 +133,7 @@ void PocoHTTPClientConfiguration::updateSchemeAndRegion() } else { - /// In global mode AWS C++ SDK send `us-east-1` but accept switching to another one if being suggested. + /// In global mode AWS C++ SDK sends `us-east-1` but accepts switching to another one if being suggested. region = Aws::Region::AWS_GLOBAL; } } @@ -166,6 +171,17 @@ PocoHTTPClient::PocoHTTPClient(const PocoHTTPClientConfiguration & client_config { } +PocoHTTPClient::PocoHTTPClient(const Aws::Client::ClientConfiguration & client_configuration) + : timeouts(ConnectionTimeouts() + .withConnectionTimeout(Poco::Timespan(client_configuration.connectTimeoutMs * 1000)) + .withSendTimeout(Poco::Timespan(client_configuration.requestTimeoutMs * 1000)) + .withReceiveTimeout(Poco::Timespan(client_configuration.requestTimeoutMs * 1000)) + .withTCPKeepAliveTimeout(Poco::Timespan( + client_configuration.enableTcpKeepAlive ? client_configuration.tcpKeepAliveIntervalMs * 1000 : 0))), + remote_host_filter(Context::getGlobalContextInstance()->getRemoteHostFilter()) +{ +} + std::shared_ptr PocoHTTPClient::MakeRequest( const std::shared_ptr & request, Aws::Utils::RateLimits::RateLimiterInterface * readLimiter, @@ -381,8 +397,11 @@ void PocoHTTPClient::makeRequestInternalImpl( try { - const auto proxy_configuration = per_request_configuration(); - for (unsigned int attempt = 0; attempt <= s3_max_redirects; ++attempt) + ProxyConfiguration proxy_configuration; + if (per_request_configuration) + proxy_configuration = per_request_configuration(); + + for (size_t attempt = 0; attempt <= s3_max_redirects; ++attempt) { Poco::URI target_uri(uri); @@ -500,7 +519,6 @@ void PocoHTTPClient::makeRequestInternalImpl( LOG_TEST(log, "Redirecting request to new location: {}", location); addMetric(request, S3MetricType::Redirects); - continue; } @@ -548,9 +566,9 @@ void PocoHTTPClient::makeRequestInternalImpl( } else { - if (status_code == 429 || status_code == 503) - { // API throttling + { + /// API throttling addMetric(request, S3MetricType::Throttling); } else if (status_code >= 300) diff --git a/src/IO/S3/PocoHTTPClient.h b/src/IO/S3/PocoHTTPClient.h index 88251b964e2..eb65460ce13 100644 --- a/src/IO/S3/PocoHTTPClient.h +++ b/src/IO/S3/PocoHTTPClient.h @@ -20,6 +20,7 @@ #include #include + namespace Aws::Http::Standard { class StandardHttpResponse; @@ -27,18 +28,20 @@ class StandardHttpResponse; namespace DB { - class Context; } + namespace DB::S3 { + class ClientFactory; class PocoHTTPClient; + struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration { - std::function per_request_configuration; + std::function per_request_configuration; String force_region; const RemoteHostFilter & remote_host_filter; unsigned int s3_max_redirects; @@ -54,13 +57,13 @@ struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration size_t http_keep_alive_timeout = DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT; size_t http_keep_alive_max_requests = DEFAULT_HTTP_KEEP_ALIVE_MAX_REQUEST; - std::function error_report; + std::function error_report; void updateSchemeAndRegion(); private: PocoHTTPClientConfiguration( - std::function per_request_configuration_, + std::function per_request_configuration_, const String & force_region_, const RemoteHostFilter & remote_host_filter_, unsigned int s3_max_redirects_, @@ -70,13 +73,13 @@ private: bool s3_use_adaptive_timeouts_, const ThrottlerPtr & get_request_throttler_, const ThrottlerPtr & put_request_throttler_, - std::function error_report_ - ); + std::function error_report_); /// Constructor of Aws::Client::ClientConfiguration must be called after AWS SDK initialization. friend ClientFactory; }; + class PocoHTTPResponse : public Aws::Http::Standard::StandardHttpResponse { public: @@ -116,10 +119,12 @@ private: Aws::Utils::Stream::ResponseStream body_stream; }; + class PocoHTTPClient : public Aws::Http::HttpClient { public: explicit PocoHTTPClient(const PocoHTTPClientConfiguration & client_configuration); + explicit PocoHTTPClient(const Aws::Client::ClientConfiguration & client_configuration); ~PocoHTTPClient() override = default; std::shared_ptr MakeRequest( @@ -166,14 +171,14 @@ protected: static S3MetricKind getMetricKind(const Aws::Http::HttpRequest & request); void addMetric(const Aws::Http::HttpRequest & request, S3MetricType type, ProfileEvents::Count amount = 1) const; - std::function per_request_configuration; - std::function error_report; + std::function per_request_configuration; + std::function error_report; ConnectionTimeouts timeouts; const RemoteHostFilter & remote_host_filter; - unsigned int s3_max_redirects; + unsigned int s3_max_redirects = 0; bool s3_use_adaptive_timeouts = true; - bool enable_s3_requests_logging; - bool for_disk_s3; + bool enable_s3_requests_logging = false; + bool for_disk_s3 = false; /// Limits get request per second rate for GET, SELECT and all other requests, excluding throttled by put throttler /// (i.e. throttles GetObject, HeadObject) diff --git a/src/IO/S3/PocoHTTPClientFactory.cpp b/src/IO/S3/PocoHTTPClientFactory.cpp index ef7af2d01ba..abec907778c 100644 --- a/src/IO/S3/PocoHTTPClientFactory.cpp +++ b/src/IO/S3/PocoHTTPClientFactory.cpp @@ -15,7 +15,10 @@ namespace DB::S3 std::shared_ptr PocoHTTPClientFactory::CreateHttpClient(const Aws::Client::ClientConfiguration & client_configuration) const { - return std::make_shared(static_cast(client_configuration)); + if (client_configuration.userAgent.starts_with("ClickHouse")) + return std::make_shared(static_cast(client_configuration)); + else /// This client is created inside the AWS SDK with default settings to obtain ECS credentials from localhost. + return std::make_shared(client_configuration); } std::shared_ptr PocoHTTPClientFactory::CreateHttpRequest( diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index fead18315d8..9c80b377661 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -1,8 +1,8 @@ #include -#include -#include -#include "Common/Macros.h" + #if USE_AWS_S3 +#include +#include #include #include #include @@ -10,6 +10,7 @@ #include + namespace DB { @@ -40,21 +41,13 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// Case when AWS Private Link Interface is being used /// E.g. (bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/bucket-name/key) /// https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html - static const RE2 aws_private_link_style_pattern(R"(bucket\.vpce\-([a-z0-9\-.]+)\.vpce.amazonaws.com(:\d{1,5})?)"); + static const RE2 aws_private_link_style_pattern(R"(bucket\.vpce\-([a-z0-9\-.]+)\.vpce\.amazonaws\.com(:\d{1,5})?)"); - /// Case when bucket name and key represented in path of S3 URL. + /// Case when bucket name and key represented in the path of S3 URL. /// E.g. (https://s3.region.amazonaws.com/bucket-name/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access static const RE2 path_style_pattern("^/([^/]*)/(.*)"); - static constexpr auto S3 = "S3"; - static constexpr auto S3EXPRESS = "S3EXPRESS"; - static constexpr auto COSN = "COSN"; - static constexpr auto COS = "COS"; - static constexpr auto OBS = "OBS"; - static constexpr auto OSS = "OSS"; - static constexpr auto EOS = "EOS"; - if (allow_archive_path_syntax) std::tie(uri_str, archive_pattern) = getURIAndArchivePattern(uri_); else @@ -85,7 +78,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) URIConverter::modifyURI(uri, mapper); } - storage_name = S3; + storage_name = "S3"; if (uri.getHost().empty()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host is empty in S3 URI."); @@ -93,11 +86,13 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// Extract object version ID from query string. bool has_version_id = false; for (const auto & [query_key, query_value] : uri.getQueryParameters()) + { if (query_key == "versionId") { version_id = query_value; has_version_id = true; } + } /// Poco::URI will ignore '?' when parsing the path, but if there is a versionId in the http parameter, /// '?' can not be used as a wildcard, otherwise it will be ambiguous. @@ -129,15 +124,8 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) } boost::to_upper(name); - /// For S3Express it will look like s3express-eun1-az1, i.e. contain region and AZ info - if (name != S3 && !name.starts_with(S3EXPRESS) && name != COS && name != OBS && name != OSS && name != EOS) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Object storage system name is unrecognized in virtual hosted style S3 URI: {}", - quoteString(name)); - - if (name == COS) - storage_name = COSN; + if (name == "COS") + storage_name = "COSN"; else storage_name = name; } @@ -148,13 +136,22 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) validateBucket(bucket, uri); } else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket or key name are invalid in S3 URI."); + { + /// Custom endpoint, e.g. a public domain of Cloudflare R2, + /// which could be served by a custom server-side code. + storage_name = "S3"; + bucket = "default"; + is_virtual_hosted_style = false; + endpoint = uri.getScheme() + "://" + uri.getAuthority(); + if (!uri.getPath().empty()) + key = uri.getPath().substr(1); + } } void URI::addRegionToURI(const std::string ®ion) { - if (auto pos = endpoint.find("amazonaws.com"); pos != std::string::npos) - endpoint = endpoint.substr(0, pos) + region + "." + endpoint.substr(pos); + if (auto pos = endpoint.find(".amazonaws.com"); pos != std::string::npos) + endpoint = endpoint.substr(0, pos) + "." + region + endpoint.substr(pos); } void URI::validateBucket(const String & bucket, const Poco::URI & uri) diff --git a/src/IO/S3/URI.h b/src/IO/S3/URI.h index 80e2da96cd4..c8d0b28cd15 100644 --- a/src/IO/S3/URI.h +++ b/src/IO/S3/URI.h @@ -1,14 +1,14 @@ #pragma once -#include -#include - #include "config.h" #if USE_AWS_S3 +#include +#include #include + namespace DB::S3 { @@ -23,7 +23,7 @@ namespace DB::S3 struct URI { Poco::URI uri; - // Custom endpoint if URI scheme is not S3. + // Custom endpoint if URI scheme, if not S3. std::string endpoint; std::string bucket; std::string key; diff --git a/src/IO/WriteBuffer.h b/src/IO/WriteBuffer.h index 4759f96a235..84b1079b824 100644 --- a/src/IO/WriteBuffer.h +++ b/src/IO/WriteBuffer.h @@ -64,7 +64,8 @@ public: } bytes += bytes_in_buffer; - pos = working_buffer.begin(); + pos = working_buffer.begin() + nextimpl_working_buffer_offset; + nextimpl_working_buffer_offset = 0; } /// Calling finalize() in the destructor of derived classes is a bad practice. @@ -164,6 +165,11 @@ protected: bool finalized = false; bool canceled = false; + /// The number of bytes to preserve from the initial position of `working_buffer` + /// buffer. Apparently this is an additional out-parameter for nextImpl(), + /// not a real field. + size_t nextimpl_working_buffer_offset = 0; + private: /** Write the data in the buffer (from the beginning of the buffer to the current position). * Throw an exception if something is wrong. diff --git a/src/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp index 5ed4dbdc787..ffb38a384a0 100644 --- a/src/IO/WriteBufferFromPocoSocket.cpp +++ b/src/IO/WriteBufferFromPocoSocket.cpp @@ -183,6 +183,7 @@ WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_ , socket(socket_) , peer_address(socket.peerAddress()) , our_address(socket.address()) + , write_event(ProfileEvents::end()) , socket_description("socket (" + peer_address.toString() + ")") { } diff --git a/src/IO/WriteBufferFromPocoSocketChunked.cpp b/src/IO/WriteBufferFromPocoSocketChunked.cpp new file mode 100644 index 00000000000..9da46ee2d10 --- /dev/null +++ b/src/IO/WriteBufferFromPocoSocketChunked.cpp @@ -0,0 +1,210 @@ +#include +#include +#include + + +namespace +{ + +template +void setValue(T * typed_ptr, std::type_identity_t val) +{ + memcpy(static_cast(typed_ptr), &val, sizeof(T)); +} + +} + +namespace DB +{ + +WriteBufferFromPocoSocketChunked::WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size) + : WriteBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size) +{} + +WriteBufferFromPocoSocketChunked::WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size) + : WriteBufferFromPocoSocket( + socket_, write_event_, + std::clamp(buf_size, sizeof(*chunk_size_ptr) + 1, static_cast(std::numeric_limits>::max()))), + log(getLogger("Protocol")) +{} + +void WriteBufferFromPocoSocketChunked::enableChunked() +{ + chunked = true; + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + /// Pretend finishChunk() was just called to prevent sending empty chunk if finishChunk() called immediately + last_finish_chunk = chunk_size_ptr; +} + +void WriteBufferFromPocoSocketChunked::finishChunk() +{ + if (!chunked) + return; + + if (pos <= reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr)) + { + /// Prevent duplicate finish chunk (and finish chunk right after enableChunked()) + if (chunk_size_ptr == last_finish_chunk) + return; + + /// If current chunk is empty it means we are finishing a chunk previously sent by next(), + /// we want to convert current chunk header into end-of-chunk marker and initialize next chunk. + /// We don't need to worry about if it's the end of the buffer because next() always sends the whole buffer + /// so it should be a beginning of the buffer. + + chassert(reinterpret_cast(chunk_size_ptr) == working_buffer.begin()); + + setValue(chunk_size_ptr, 0); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + + last_finish_chunk = chunk_size_ptr; + + return; + } + + /// Previously finished chunk wasn't sent yet + if (last_finish_chunk == chunk_size_ptr) + { + chunk_started = false; + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); + } + + /// Fill up current chunk size + setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); + + if (!chunk_started) + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", + ourAddress().toString(), peerAddress().toString(), + static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), + *chunk_size_ptr); + else + { + chunk_started = false; + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); + } + + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); + + if (available() < sizeof(*chunk_size_ptr)) + { + finishing = available(); + pos += available(); + chunk_size_ptr = reinterpret_cast(pos); + last_finish_chunk = chunk_size_ptr; + return; + } + + /// Buffer end-of-chunk + setValue(reinterpret_cast(pos), 0); + pos += sizeof(*chunk_size_ptr); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + + last_finish_chunk = chunk_size_ptr; +} + +WriteBufferFromPocoSocketChunked::~WriteBufferFromPocoSocketChunked() +{ + try + { + finalize(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } +} + +void WriteBufferFromPocoSocketChunked::nextImpl() +{ + if (!chunked) + { + WriteBufferFromPocoSocket::nextImpl(); + return; + } + + /// next() after finishChunk at the end of the buffer + if (finishing < sizeof(*chunk_size_ptr)) + { + pos -= finishing; + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Send end-of-chunk directly + UInt32 s = 0; + socketSendBytes(reinterpret_cast(&s), sizeof(s)); + + finishing = sizeof(*chunk_size_ptr); + + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = chunk_size_ptr; + + return; + } + + /// Prevent sending empty chunk + if (offset() == sizeof(*chunk_size_ptr)) + { + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + return; + } + + /// Finish chunk at the end of the buffer + if (working_buffer.end() - reinterpret_cast(chunk_size_ptr) <= static_cast(sizeof(*chunk_size_ptr))) + { + pos = reinterpret_cast(chunk_size_ptr); + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = nullptr; + + return; + } + + bool initialize_last_finish_chunk = false; + if (pos - reinterpret_cast(chunk_size_ptr) == sizeof(*chunk_size_ptr)) // next() after finishChunk + { + pos -= sizeof(*chunk_size_ptr); + initialize_last_finish_chunk = true; + } + else // fill up current chunk size + { + setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); + if (!chunk_started) + { + chunk_started = true; + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", + ourAddress().toString(), peerAddress().toString(), + static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), + *chunk_size_ptr); + } + else + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); + } + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = initialize_last_finish_chunk ? chunk_size_ptr : nullptr; +} + +void WriteBufferFromPocoSocketChunked::finalizeImpl() +{ + if (chunked && offset() == sizeof(*chunk_size_ptr)) + pos -= sizeof(*chunk_size_ptr); + WriteBufferFromPocoSocket::finalizeImpl(); +} + +} diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h new file mode 100644 index 00000000000..13a277e3bfb --- /dev/null +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ + +class WriteBufferFromPocoSocketChunked: public WriteBufferFromPocoSocket +{ +public: + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + + void enableChunked(); + void finishChunk(); + ~WriteBufferFromPocoSocketChunked() override; + +protected: + void nextImpl() override; + void finalizeImpl() override; + Poco::Net::SocketAddress peerAddress() const { return peer_address; } + Poco::Net::SocketAddress ourAddress() const { return our_address; } + +private: + LoggerPtr log; + bool chunked = false; + UInt32 * last_finish_chunk = nullptr; // pointer to the last chunk header created by finishChunk + bool chunk_started = false; // chunk started flag + UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer + size_t finishing = sizeof(*chunk_size_ptr); // indicates not enough buffer for end-of-chunk marker +}; + +} diff --git a/src/IO/parseDateTimeBestEffort.cpp b/src/IO/parseDateTimeBestEffort.cpp index e046e837689..52bcdc6bbb4 100644 --- a/src/IO/parseDateTimeBestEffort.cpp +++ b/src/IO/parseDateTimeBestEffort.cpp @@ -82,13 +82,14 @@ struct DateTimeSubsecondPart UInt8 digits; }; -template +template ReturnType parseDateTimeBestEffortImpl( time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, - DateTimeSubsecondPart * fractional) + DateTimeSubsecondPart * fractional, + const char * allowed_date_delimiters = nullptr) { auto on_error = [&](int error_code [[maybe_unused]], FormatStringHelper fmt_string [[maybe_unused]], @@ -170,22 +171,36 @@ ReturnType parseDateTimeBestEffortImpl( fractional->digits = 3; readDecimalNumber<3>(fractional->value, digits + 10); } + else if constexpr (strict) + { + /// Fractional part is not allowed. + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected fractional part"); + } return ReturnType(true); } else if (num_digits == 10 && !year && !has_time) { + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow timestamps"); + /// This is unix timestamp. readDecimalNumber<10>(res, digits); return ReturnType(true); } else if (num_digits == 9 && !year && !has_time) { + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow timestamps"); + /// This is unix timestamp. readDecimalNumber<9>(res, digits); return ReturnType(true); } else if (num_digits == 14 && !year && !has_time) { + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow date times without separators"); + /// This is YYYYMMDDhhmmss readDecimalNumber<4>(year, digits); readDecimalNumber<2>(month, digits + 4); @@ -197,6 +212,9 @@ ReturnType parseDateTimeBestEffortImpl( } else if (num_digits == 8 && !year) { + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow date times without separators"); + /// This is YYYYMMDD readDecimalNumber<4>(year, digits); readDecimalNumber<2>(month, digits + 4); @@ -204,6 +222,9 @@ ReturnType parseDateTimeBestEffortImpl( } else if (num_digits == 6) { + if (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Strict best effort parsing doesn't allow date times without separators"); + /// This is YYYYMM or hhmmss if (!year && !month) { @@ -272,6 +293,9 @@ ReturnType parseDateTimeBestEffortImpl( else return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected number of decimal digits after year and month: {}", num_digits); } + + if (!isSymbolIn(delimiter_after_year, allowed_date_delimiters)) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: '{}' delimiter between date parts is not allowed", delimiter_after_year); } } else if (num_digits == 2 || num_digits == 1) @@ -329,7 +353,7 @@ ReturnType parseDateTimeBestEffortImpl( if (month && !day_of_month) day_of_month = hour_or_day_of_month_or_month; } - else if (checkChar('/', in) || checkChar('.', in) || checkChar('-', in)) + else if ((!in.eof() && isSymbolIn(*in.position(), allowed_date_delimiters)) && (checkChar('/', in) || checkChar('.', in) || checkChar('-', in))) { if (day_of_month) return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: day of month is duplicated"); @@ -378,7 +402,7 @@ ReturnType parseDateTimeBestEffortImpl( if (month > 12) std::swap(month, day_of_month); - if (checkChar('/', in) || checkChar('.', in) || checkChar('-', in)) + if ((!in.eof() && isSymbolIn(*in.position(), allowed_date_delimiters)) && (checkChar('/', in) || checkChar('.', in) || checkChar('-', in))) { if (year) return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: year component is duplicated"); @@ -403,9 +427,16 @@ ReturnType parseDateTimeBestEffortImpl( else { if (day_of_month) + { + if (strict && hour) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: hour component is duplicated"); + hour = hour_or_day_of_month_or_month; + } else + { day_of_month = hour_or_day_of_month_or_month; + } } } else if (num_digits != 0) @@ -446,6 +477,11 @@ ReturnType parseDateTimeBestEffortImpl( fractional->digits = num_digits; readDecimalNumber(fractional->value, num_digits, digits); } + else if (strict) + { + /// Fractional part is not allowed. + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: unexpected fractional part"); + } } else if (c == '+' || c == '-') { @@ -582,12 +618,24 @@ ReturnType parseDateTimeBestEffortImpl( return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: neither Date nor Time was parsed successfully"); if (!day_of_month) + { + if constexpr (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: day of month is required"); day_of_month = 1; + } + if (!month) + { + if constexpr (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: month is required"); month = 1; + } if (!year) { + if constexpr (strict) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: year is required"); + /// If year is not specified, it will be the current year if the date is unknown or not greater than today, /// otherwise it will be the previous year. /// This convoluted logic is needed to parse the syslog format, which looks as follows: "Mar 3 01:33:48". @@ -641,6 +689,20 @@ ReturnType parseDateTimeBestEffortImpl( } }; + if constexpr (strict) + { + if constexpr (is_64) + { + if (year < 1900) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime64: year {} is less than minimum supported year 1900", year); + } + else + { + if (year < 1970) + return on_error(ErrorCodes::CANNOT_PARSE_DATETIME, "Cannot read DateTime: year {} is less than minimum supported year 1970", year); + } + } + if (has_time_zone_offset) { res = utc_time_zone.makeDateTime(year, month, day_of_month, hour, minute, second); @@ -654,20 +716,20 @@ ReturnType parseDateTimeBestEffortImpl( return ReturnType(true); } -template -ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone) +template +ReturnType parseDateTime64BestEffortImpl(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters = nullptr) { time_t whole; DateTimeSubsecondPart subsecond = {0, 0}; // needs to be explicitly initialized sine it could be missing from input string if constexpr (std::is_same_v) { - if (!parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond)) + if (!parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters)) return false; } else { - parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond); + parseDateTimeBestEffortImpl(whole, in, local_time_zone, utc_time_zone, &subsecond, allowed_date_delimiters); } @@ -730,4 +792,24 @@ bool tryParseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone); } +bool tryParseDateTimeBestEffortStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters) +{ + return parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone, nullptr, allowed_date_delimiters); +} + +bool tryParseDateTimeBestEffortUSStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters) +{ + return parseDateTimeBestEffortImpl(res, in, local_time_zone, utc_time_zone, nullptr, allowed_date_delimiters); +} + +bool tryParseDateTime64BestEffortStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters) +{ + return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone, allowed_date_delimiters); +} + +bool tryParseDateTime64BestEffortUSStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters) +{ + return parseDateTime64BestEffortImpl(res, scale, in, local_time_zone, utc_time_zone, allowed_date_delimiters); +} + } diff --git a/src/IO/parseDateTimeBestEffort.h b/src/IO/parseDateTimeBestEffort.h index 22af44f9e76..6dd052b67a3 100644 --- a/src/IO/parseDateTimeBestEffort.h +++ b/src/IO/parseDateTimeBestEffort.h @@ -63,4 +63,12 @@ void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, bool tryParseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); void parseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); bool tryParseDateTime64BestEffortUS(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone); + +/// More strict version of best effort parsing. Requires day, month and year to be present, checks for allowed +/// delimiters between date components, makes additional correctness checks. Used in schema inference if date times. +bool tryParseDateTimeBestEffortStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters); +bool tryParseDateTimeBestEffortUSStrict(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters); +bool tryParseDateTime64BestEffortStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters); +bool tryParseDateTime64BestEffortUSStrict(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone, const char * allowed_date_delimiters); + } diff --git a/src/IO/tests/gtest_s3_uri.cpp b/src/IO/tests/gtest_s3_uri.cpp index 0ec28f80072..c0bf7fcb28a 100644 --- a/src/IO/tests/gtest_s3_uri.cpp +++ b/src/IO/tests/gtest_s3_uri.cpp @@ -206,11 +206,6 @@ TEST(S3UriTest, validPatterns) } } -TEST_P(S3UriTest, invalidPatterns) -{ - ASSERT_ANY_THROW(S3::URI new_uri(GetParam())); -} - TEST(S3UriTest, versionIdChecks) { for (const auto& test_case : TestCases) @@ -223,19 +218,5 @@ TEST(S3UriTest, versionIdChecks) } } -INSTANTIATE_TEST_SUITE_P( - S3, - S3UriTest, - testing::Values( - "https:///", - "https://.s3.amazonaws.com/key", - "https://s3.amazonaws.com/key", - "https://jokserfn.s3amazonaws.com/key", - "https://s3.amazonaws.com//", - "https://amazonaws.com/", - "https://amazonaws.com//", - "https://amazonaws.com//key")); - } - #endif diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index 13c70b38543..60db406ca72 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -11,11 +11,15 @@ #include #include #include +#include +#include #include #include #include +#include #include +#include namespace fs = std::filesystem; @@ -88,6 +92,7 @@ FileCache::FileCache(const std::string & cache_name, const FileCacheSettings & s , bypass_cache_threshold(settings.enable_bypass_cache_with_threshold ? settings.bypass_cache_threshold : 0) , boundary_alignment(settings.boundary_alignment) , load_metadata_threads(settings.load_metadata_threads) + , load_metadata_asynchronously(settings.load_metadata_asynchronously) , write_cache_per_user_directory(settings.write_cache_per_user_id_directory) , keep_current_size_to_max_ratio(1 - settings.keep_free_space_size_ratio) , keep_current_elements_to_max_ratio(1 - settings.keep_free_space_elements_ratio) @@ -136,7 +141,17 @@ const FileCache::UserInfo & FileCache::getInternalUser() bool FileCache::isInitialized() const { - return is_initialized.load(std::memory_order_seq_cst); + return is_initialized; +} + +void FileCache::throwInitExceptionIfNeeded() +{ + if (load_metadata_asynchronously) + return; + + std::lock_guard lock(init_mutex); + if (init_exception) + std::rethrow_exception(init_exception); } const String & FileCache::getBasePath() const @@ -170,6 +185,35 @@ void FileCache::assertInitialized() const } void FileCache::initialize() +{ + // Prevent initialize() from running twice. This may be caused by two cache disks being created with the same path (see integration/test_filesystem_cache). + callOnce(initialize_called, [&] { + bool need_to_load_metadata = fs::exists(getBasePath()); + try + { + if (!need_to_load_metadata) + fs::create_directories(getBasePath()); + status_file = make_unique(fs::path(getBasePath()) / "status", StatusFile::write_full_info); + } + catch (...) + { + init_exception = std::current_exception(); + tryLogCurrentException(__PRETTY_FUNCTION__); + throw; + } + + if (load_metadata_asynchronously) + { + load_metadata_main_thread = ThreadFromGlobalPool([this, need_to_load_metadata] { initializeImpl(need_to_load_metadata); }); + } + else + { + initializeImpl(need_to_load_metadata); + } + }); +} + +void FileCache::initializeImpl(bool load_metadata) { std::lock_guard lock(init_mutex); @@ -178,16 +222,10 @@ void FileCache::initialize() try { - if (fs::exists(getBasePath())) - { + if (load_metadata) loadMetadata(); - } - else - { - fs::create_directories(getBasePath()); - } - status_file = make_unique(fs::path(getBasePath()) / "status", StatusFile::write_full_info); + metadata.startup(); } catch (...) { @@ -196,8 +234,6 @@ void FileCache::initialize() throw; } - metadata.startup(); - if (keep_current_size_to_max_ratio != 1 || keep_current_elements_to_max_ratio != 1) { keep_up_free_space_ratio_task = Context::getGlobalContextInstance()->getSchedulePool().createTask(log->name(), [this] { freeSpaceRatioKeepingThreadFunc(); }); @@ -205,6 +241,7 @@ void FileCache::initialize() } is_initialized = true; + LOG_TEST(log, "Initialized cache from {}", metadata.getBaseDirectory()); } CachePriorityGuard::Lock FileCache::lockCache() const @@ -804,7 +841,8 @@ bool FileCache::tryReserve( const size_t size, FileCacheReserveStat & reserve_stat, const UserInfo & user, - size_t lock_wait_timeout_milliseconds) + size_t lock_wait_timeout_milliseconds, + std::string & failure_reason) { ProfileEventTimeIncrement watch(ProfileEvents::FilesystemCacheReserveMicroseconds); @@ -817,6 +855,7 @@ bool FileCache::tryReserve( if (cache_is_being_resized.load(std::memory_order_relaxed)) { ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfCacheResize); + failure_reason = "cache is being resized"; return false; } @@ -824,6 +863,7 @@ bool FileCache::tryReserve( if (!cache_lock) { ProfileEvents::increment(ProfileEvents::FilesystemCacheFailToReserveSpaceBecauseOfLockContention); + failure_reason = "cache contention"; return false; } @@ -847,6 +887,7 @@ bool FileCache::tryReserve( LOG_TEST(log, "Query limit exceeded, space reservation failed, " "recache_on_query_limit_exceeded is disabled (while reserving for {}:{})", file_segment.key(), file_segment.offset()); + failure_reason = "query limit exceeded"; return false; } @@ -877,6 +918,7 @@ bool FileCache::tryReserve( if (!query_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, {}, user.user_id, cache_lock)) { + failure_reason = "cannot evict enough space for query limit"; return false; } @@ -891,11 +933,15 @@ bool FileCache::tryReserve( if (!main_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, queue_iterator, user.user_id, cache_lock)) { + failure_reason = "cannot evict enough space"; return false; } if (!file_segment.getKeyMetadata()->createBaseDirectory()) + { + failure_reason = "not enough space on device"; return false; + } if (eviction_candidates.size() > 0) { @@ -1188,7 +1234,6 @@ void FileCache::loadMetadataImpl() std::vector loading_threads; std::exception_ptr first_exception; std::mutex set_exception_mutex; - std::atomic stop_loading = false; LOG_INFO(log, "Loading filesystem cache with {} threads from {}", load_metadata_threads, metadata.getBaseDirectory()); @@ -1198,7 +1243,7 @@ void FileCache::loadMetadataImpl() { loading_threads.emplace_back([&] { - while (!stop_loading) + while (!stop_loading_metadata) { try { @@ -1215,7 +1260,7 @@ void FileCache::loadMetadataImpl() if (!first_exception) first_exception = std::current_exception(); } - stop_loading = true; + stop_loading_metadata = true; return; } } @@ -1228,7 +1273,7 @@ void FileCache::loadMetadataImpl() if (!first_exception) first_exception = std::current_exception(); } - stop_loading = true; + stop_loading_metadata = true; break; } } @@ -1415,6 +1460,11 @@ FileCache::~FileCache() void FileCache::deactivateBackgroundOperations() { shutdown.store(true); + + stop_loading_metadata = true; + if (load_metadata_main_thread.joinable()) + load_metadata_main_thread.join(); + metadata.shutdown(); if (keep_up_free_space_ratio_task) keep_up_free_space_ratio_task->deactivate(); diff --git a/src/Interpreters/Cache/FileCache.h b/src/Interpreters/Cache/FileCache.h index 07be802a940..8e8f01ff39e 100644 --- a/src/Interpreters/Cache/FileCache.h +++ b/src/Interpreters/Cache/FileCache.h @@ -8,6 +8,7 @@ #include +#include #include #include #include @@ -82,6 +83,9 @@ public: bool isInitialized() const; + /// Throws if `!load_metadata_asynchronously` and there is an exception in `init_exception` + void throwInitExceptionIfNeeded(); + const String & getBasePath() const; static Key createKeyForPath(const String & path); @@ -165,7 +169,8 @@ public: size_t size, FileCacheReserveStat & stat, const UserInfo & user, - size_t lock_wait_timeout_milliseconds); + size_t lock_wait_timeout_milliseconds, + std::string & failure_reason); std::vector getFileSegmentInfos(const UserID & user_id); @@ -198,6 +203,9 @@ private: const size_t bypass_cache_threshold; const size_t boundary_alignment; size_t load_metadata_threads; + const bool load_metadata_asynchronously; + std::atomic stop_loading_metadata = false; + ThreadFromGlobalPool load_metadata_main_thread; const bool write_cache_per_user_directory; BackgroundSchedulePool::TaskHolder keep_up_free_space_ratio_task; @@ -209,6 +217,7 @@ private: std::exception_ptr init_exception; std::atomic is_initialized = false; + OnceFlag initialize_called; mutable std::mutex init_mutex; std::unique_ptr status_file; std::atomic shutdown = false; @@ -246,6 +255,8 @@ private: */ FileCacheQueryLimitPtr query_limit; + void initializeImpl(bool load_metadata); + void assertInitialized() const; void assertCacheCorrectness(); diff --git a/src/Interpreters/Cache/FileCacheSettings.cpp b/src/Interpreters/Cache/FileCacheSettings.cpp index c68ff3183c6..e162d6b7551 100644 --- a/src/Interpreters/Cache/FileCacheSettings.cpp +++ b/src/Interpreters/Cache/FileCacheSettings.cpp @@ -65,6 +65,9 @@ void FileCacheSettings::loadImpl(FuncHas has, FuncGetUInt get_uint, FuncGetStrin if (has("load_metadata_threads")) load_metadata_threads = get_uint("load_metadata_threads"); + if (has("load_metadata_asynchronously")) + load_metadata_asynchronously = get_uint("load_metadata_asynchronously"); + if (boundary_alignment > max_file_segment_size) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Setting `boundary_alignment` cannot exceed `max_file_segment_size`"); diff --git a/src/Interpreters/Cache/FileCacheSettings.h b/src/Interpreters/Cache/FileCacheSettings.h index 93ded202947..72a2b6c3369 100644 --- a/src/Interpreters/Cache/FileCacheSettings.h +++ b/src/Interpreters/Cache/FileCacheSettings.h @@ -32,6 +32,7 @@ struct FileCacheSettings size_t background_download_queue_size_limit = FILECACHE_DEFAULT_BACKGROUND_DOWNLOAD_QUEUE_SIZE_LIMIT; size_t load_metadata_threads = FILECACHE_DEFAULT_LOAD_METADATA_THREADS; + bool load_metadata_asynchronously = false; bool write_cache_per_user_id_directory = false; diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index c46fb978ae4..cfbdfbaa257 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -502,7 +502,11 @@ LockedKeyPtr FileSegment::lockKeyMetadata(bool assert_exists) const return metadata->tryLock(); } -bool FileSegment::reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat) +bool FileSegment::reserve( + size_t size_to_reserve, + size_t lock_wait_timeout_milliseconds, + std::string & failure_reason, + FileCacheReserveStat * reserve_stat) { if (!size_to_reserve) throw Exception(ErrorCodes::LOGICAL_ERROR, "Zero space reservation is not allowed"); @@ -554,7 +558,7 @@ bool FileSegment::reserve(size_t size_to_reserve, size_t lock_wait_timeout_milli if (!reserve_stat) reserve_stat = &dummy_stat; - bool reserved = cache->tryReserve(*this, size_to_reserve, *reserve_stat, getKeyMetadata()->user, lock_wait_timeout_milliseconds); + bool reserved = cache->tryReserve(*this, size_to_reserve, *reserve_stat, getKeyMetadata()->user, lock_wait_timeout_milliseconds, failure_reason); if (!reserved) setDownloadFailedUnlocked(lock()); diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index 25ffb880b45..e90ebdbf8fe 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -201,7 +201,11 @@ public: /// Try to reserve exactly `size` bytes (in addition to the getDownloadedSize() bytes already downloaded). /// Returns true if reservation was successful, false otherwise. - bool reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat = nullptr); + bool reserve( + size_t size_to_reserve, + size_t lock_wait_timeout_milliseconds, + std::string & failure_reason, + FileCacheReserveStat * reserve_stat = nullptr); /// Write data into reserved space. void write(char * from, size_t size, size_t offset_in_file); diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 7e4b76d3cc6..6399691bcf6 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -705,7 +705,8 @@ void CacheMetadata::downloadImpl(FileSegment & file_segment, std::optionalavailable(); - if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds)) + std::string failure_reason; + if (!file_segment.reserve(size, reserve_space_lock_wait_timeout_milliseconds, failure_reason)) { LOG_TEST( log, "Failed to reserve space during background download " diff --git a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp index e6ebf6ad50c..e43bbacdbc5 100644 --- a/src/Interpreters/Cache/WriteBufferToFileSegment.cpp +++ b/src/Interpreters/Cache/WriteBufferToFileSegment.cpp @@ -75,7 +75,8 @@ void WriteBufferToFileSegment::nextImpl() FileCacheReserveStat reserve_stat; /// In case of an error, we don't need to finalize the file segment /// because it will be deleted soon and completed in the holder's destructor. - bool ok = file_segment->reserve(bytes_to_write, reserve_space_lock_wait_timeout_milliseconds, &reserve_stat); + std::string failure_reason; + bool ok = file_segment->reserve(bytes_to_write, reserve_space_lock_wait_timeout_milliseconds, failure_reason, &reserve_stat); if (!ok) { @@ -84,9 +85,10 @@ void WriteBufferToFileSegment::nextImpl() reserve_stat_msg += fmt::format("{} hold {}, can release {}; ", toString(kind), ReadableSize(stat.non_releasable_size), ReadableSize(stat.releasable_size)); - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Failed to reserve {} bytes for {}: {}(segment info: {})", + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Failed to reserve {} bytes for {}: reason {}, {}(segment info: {})", bytes_to_write, file_segment->getKind() == FileSegmentKind::Temporary ? "temporary file" : "the file in cache", + failure_reason, reserve_stat_msg, file_segment->getInfoForLog() ); diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 59c98491c14..7b7bedc850d 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -113,6 +113,9 @@ Cluster::Address::Address( secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable; priority = Priority{config.getInt(config_prefix + ".priority", 1)}; + proto_send_chunked = config.getString(config_prefix + ".proto_caps.send", "notchunked"); + proto_recv_chunked = config.getString(config_prefix + ".proto_caps.recv", "notchunked"); + const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port"; auto default_port = config.getInt(port_type, 0); @@ -227,21 +230,37 @@ String Cluster::Address::toFullString(bool use_compact_format) const } } -Cluster::Address Cluster::Address::fromFullString(const String & full_string) +Cluster::Address Cluster::Address::fromFullString(std::string_view full_string) { - const char * address_begin = full_string.data(); - const char * address_end = address_begin + full_string.size(); - - const char * user_pw_end = strchr(full_string.data(), '@'); + std::string_view user_password; + if (auto pos = full_string.find('@'); pos != std::string_view::npos) + user_password = full_string.substr(pos + 1); /// parsing with the new shard{shard_index}[_replica{replica_index}] format - if (!user_pw_end && startsWith(full_string, "shard")) + if (user_password.empty() && full_string.starts_with("shard")) { - const char * underscore = strchr(full_string.data(), '_'); - Address address; - address.shard_index = parse(address_begin + strlen("shard")); - address.replica_index = underscore ? parse(underscore + strlen("_replica")) : 0; + + if (auto underscore_pos = full_string.find('_'); underscore_pos != std::string_view::npos) + { + address.shard_index = parse(full_string.substr(0, underscore_pos).substr(strlen("shard"))); + + if (full_string.substr(underscore_pos + 1).starts_with("replica")) + { + address.replica_index = parse(full_string.substr(underscore_pos + 1 + strlen("replica"))); + } + else if (full_string.substr(underscore_pos + 1).starts_with("all_replicas")) + { + address.replica_index = 0; + } + else + throw Exception(ErrorCodes::SYNTAX_ERROR, "Incorrect address '{}', should be in a form of `shardN_all_replicas` or `shardN_replicaM`", full_string); + } + else + { + address.shard_index = parse(full_string.substr(strlen("shard"))); + address.replica_index = 0; + } return address; } @@ -252,9 +271,13 @@ Cluster::Address Cluster::Address::fromFullString(const String & full_string) /// - credentials are exposed in file name; /// - the file name can be too long. + const char * address_begin = full_string.data(); + const char * address_end = address_begin + full_string.size(); + const char * user_pw_end = strchr(address_begin, '@'); + Protocol::Secure secure = Protocol::Secure::Disable; const char * secure_tag = "+secure"; - if (endsWith(full_string, secure_tag)) + if (full_string.ends_with(secure_tag)) { address_end -= strlen(secure_tag); secure = Protocol::Secure::Enable; @@ -425,7 +448,9 @@ Cluster::Cluster(const Poco::Util::AbstractConfiguration & config, auto pool = ConnectionPoolFactory::instance().get( static_cast(settings.distributed_connections_pool_size), address.host_name, address.port, - address.default_database, address.user, address.password, address.quota_key, + address.default_database, address.user, address.password, + address.proto_send_chunked, address.proto_recv_chunked, + address.quota_key, address.cluster, address.cluster_secret, "server", address.compression, address.secure, address.priority); @@ -589,6 +614,8 @@ void Cluster::addShard( replica.default_database, replica.user, replica.password, + replica.proto_send_chunked, + replica.proto_recv_chunked, replica.quota_key, replica.cluster, replica.cluster_secret, @@ -744,6 +771,8 @@ Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Setti address.default_database, address.user, address.password, + address.proto_send_chunked, + address.proto_recv_chunked, address.quota_key, address.cluster, address.cluster_secret, diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index dc5790ac339..82d77941b76 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -114,6 +114,8 @@ public: UInt16 port{0}; String user; String password; + String proto_send_chunked = "notchunked"; + String proto_recv_chunked = "notchunked"; String quota_key; /// For inter-server authorization @@ -166,7 +168,7 @@ public: String toFullString(bool use_compact_format) const; /// Returns address with only shard index and replica index or full address without shard index and replica index - static Address fromFullString(const String & address_full_string); + static Address fromFullString(std::string_view full_string); /// Returns resolved address if it does resolve. std::optional getResolvedAddress() const; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 4a08fd5fe5b..3cc09370e86 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -2957,6 +2957,9 @@ ProgressCallback Context::getProgressCallback() const void Context::setProcessListElement(QueryStatusPtr elem) { + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have process list element"); + /// Set to a session or query. In the session, only one query is processed at a time. Therefore, the lock is not needed. process_list_elem = elem; has_process_list_elem = elem.get(); diff --git a/src/Interpreters/HashJoin/AddedColumns.cpp b/src/Interpreters/HashJoin/AddedColumns.cpp index 930a352744d..21cb6e401ed 100644 --- a/src/Interpreters/HashJoin/AddedColumns.cpp +++ b/src/Interpreters/HashJoin/AddedColumns.cpp @@ -15,48 +15,115 @@ JoinOnKeyColumns::JoinOnKeyColumns(const Block & block, const Names & key_names_ { } -template<> void AddedColumns::buildOutput() -{ -} +template<> +void AddedColumns::buildOutput() {} + +template<> +void AddedColumns::buildJoinGetOutput() {} + +template<> +template +void AddedColumns::buildOutputFromBlocks() {} template<> void AddedColumns::buildOutput() { - for (size_t i = 0; i < this->size(); ++i) + if (!output_by_row_list) + buildOutputFromBlocks(); + else { - auto& col = columns[i]; - size_t default_count = 0; - auto apply_default = [&]() + if (join_data_avg_perkey_rows < output_by_row_list_threshold) + buildOutputFromBlocks(); + else { - if (default_count > 0) + for (size_t i = 0; i < this->size(); ++i) { - JoinCommon::addDefaultValues(*col, type_name[i].type, default_count); - default_count = 0; - } - }; - - for (size_t j = 0; j < lazy_output.blocks.size(); ++j) - { - if (!lazy_output.blocks[j]) - { - default_count++; - continue; - } - apply_default(); - const auto & column_from_block = reinterpret_cast(lazy_output.blocks[j])->getByPosition(right_indexes[i]); - /// If it's joinGetOrNull, we need to wrap not-nullable columns in StorageJoin. - if (is_join_get) - { - if (auto * nullable_col = typeid_cast(col.get()); - nullable_col && !column_from_block.column->isNullable()) + auto & col = columns[i]; + for (auto row_ref_i : lazy_output.row_refs) { - nullable_col->insertFromNotNullable(*column_from_block.column, lazy_output.row_nums[j]); - continue; + if (row_ref_i) + { + const RowRefList * row_ref_list = reinterpret_cast(row_ref_i); + for (auto it = row_ref_list->begin(); it.ok(); ++it) + col->insertFrom(*it->block->getByPosition(right_indexes[i]).column, it->row_num); + } + else + type_name[i].type->insertDefaultInto(*col); } } - col->insertFrom(*column_from_block.column, lazy_output.row_nums[j]); } - apply_default(); + } +} + +template<> +void AddedColumns::buildJoinGetOutput() +{ + for (size_t i = 0; i < this->size(); ++i) + { + auto & col = columns[i]; + for (auto row_ref_i : lazy_output.row_refs) + { + if (!row_ref_i) + { + type_name[i].type->insertDefaultInto(*col); + continue; + } + const auto * row_ref = reinterpret_cast(row_ref_i); + const auto & column_from_block = row_ref->block->getByPosition(right_indexes[i]); + if (auto * nullable_col = typeid_cast(col.get()); nullable_col && !column_from_block.column->isNullable()) + nullable_col->insertFromNotNullable(*column_from_block.column, row_ref->row_num); + else + col->insertFrom(*column_from_block.column, row_ref->row_num); + } + } +} + +template<> +template +void AddedColumns::buildOutputFromBlocks() +{ + if (this->size() == 0) + return; + std::vector blocks; + std::vector row_nums; + blocks.reserve(lazy_output.row_refs.size()); + row_nums.reserve(lazy_output.row_refs.size()); + for (auto row_ref_i : lazy_output.row_refs) + { + if (row_ref_i) + { + if constexpr (from_row_list) + { + const RowRefList * row_ref_list = reinterpret_cast(row_ref_i); + for (auto it = row_ref_list->begin(); it.ok(); ++it) + { + blocks.emplace_back(it->block); + row_nums.emplace_back(it->row_num); + } + } + else + { + const RowRef * row_ref = reinterpret_cast(row_ref_i); + blocks.emplace_back(row_ref->block); + row_nums.emplace_back(row_ref->row_num); + } + } + else + { + blocks.emplace_back(nullptr); + row_nums.emplace_back(0); + } + } + for (size_t i = 0; i < this->size(); ++i) + { + auto & col = columns[i]; + for (size_t j = 0; j < blocks.size(); ++j) + { + if (blocks[j]) + col->insertFrom(*blocks[j]->getByPosition(right_indexes[i]).column, row_nums[j]); + else + type_name[i].type->insertDefaultInto(*col); + } } } @@ -72,29 +139,27 @@ void AddedColumns::applyLazyDefaults() } template<> -void AddedColumns::applyLazyDefaults() -{ -} +void AddedColumns::applyLazyDefaults() {} template <> -void AddedColumns::appendFromBlock(const Block & block, size_t row_num,const bool has_defaults) +void AddedColumns::appendFromBlock(const RowRef * row_ref, const bool has_defaults) { if (has_defaults) applyLazyDefaults(); #ifndef NDEBUG - checkBlock(block); + checkBlock(*row_ref->block); #endif if (is_join_get) { size_t right_indexes_size = right_indexes.size(); for (size_t j = 0; j < right_indexes_size; ++j) { - const auto & column_from_block = block.getByPosition(right_indexes[j]); + const auto & column_from_block = row_ref->block->getByPosition(right_indexes[j]); if (auto * nullable_col = nullable_column_ptrs[j]) - nullable_col->insertFromNotNullable(*column_from_block.column, row_num); + nullable_col->insertFromNotNullable(*column_from_block.column, row_ref->row_num); else - columns[j]->insertFrom(*column_from_block.column, row_num); + columns[j]->insertFrom(*column_from_block.column, row_ref->row_num); } } else @@ -102,22 +167,21 @@ void AddedColumns::appendFromBlock(const Block & block, size_t row_num,co size_t right_indexes_size = right_indexes.size(); for (size_t j = 0; j < right_indexes_size; ++j) { - const auto & column_from_block = block.getByPosition(right_indexes[j]); - columns[j]->insertFrom(*column_from_block.column, row_num); + const auto & column_from_block = row_ref->block->getByPosition(right_indexes[j]); + columns[j]->insertFrom(*column_from_block.column, row_ref->row_num); } } } template <> -void AddedColumns::appendFromBlock(const Block & block, size_t row_num, bool) +void AddedColumns::appendFromBlock(const RowRef * row_ref, bool) { #ifndef NDEBUG - checkBlock(block); + checkBlock(*row_ref->block); #endif if (has_columns_to_add) { - lazy_output.blocks.emplace_back(reinterpret_cast(&block)); - lazy_output.row_nums.emplace_back(static_cast(row_num)); + lazy_output.row_refs.emplace_back(reinterpret_cast(row_ref)); } } template<> @@ -131,8 +195,7 @@ void AddedColumns::appendDefaultRow() { if (has_columns_to_add) { - lazy_output.blocks.emplace_back(0); - lazy_output.row_nums.emplace_back(0); + lazy_output.row_refs.emplace_back(0); } } } diff --git a/src/Interpreters/HashJoin/AddedColumns.h b/src/Interpreters/HashJoin/AddedColumns.h index 13a7df6f498..f1b95a63be6 100644 --- a/src/Interpreters/HashJoin/AddedColumns.h +++ b/src/Interpreters/HashJoin/AddedColumns.h @@ -50,8 +50,7 @@ public: struct LazyOutput { - PaddedPODArray blocks; - PaddedPODArray row_nums; + PaddedPODArray row_refs; }; AddedColumns( @@ -76,8 +75,7 @@ public: if constexpr (lazy) { has_columns_to_add = num_columns_to_add > 0; - lazy_output.blocks.reserve(rows_to_add); - lazy_output.row_nums.reserve(rows_to_add); + lazy_output.row_refs.reserve(rows_to_add); } columns.reserve(num_columns_to_add); @@ -115,18 +113,22 @@ public: if (columns[j]->isNullable() && !saved_column->isNullable()) nullable_column_ptrs[j] = typeid_cast(columns[j].get()); } + join_data_avg_perkey_rows = join.getJoinedData()->avgPerKeyRows(); + output_by_row_list_threshold = join.getTableJoin().outputByRowListPerkeyRowsThreshold(); } size_t size() const { return columns.size(); } void buildOutput(); + void buildJoinGetOutput(); + ColumnWithTypeAndName moveColumn(size_t i) { return ColumnWithTypeAndName(std::move(columns[i]), type_name[i].type, type_name[i].qualified_name); } - void appendFromBlock(const Block & block, size_t row_num, bool has_default); + void appendFromBlock(const RowRef * row_ref, bool has_default); void appendDefaultRow(); @@ -134,6 +136,8 @@ public: const IColumn & leftAsofKey() const { return *left_asof_key; } + static constexpr bool isLazy() { return lazy; } + Block left_block; std::vector join_on_keys; ExpressionActionsPtr additional_filter_expression; @@ -142,6 +146,9 @@ public: size_t rows_to_add; std::unique_ptr offsets_to_replicate; bool need_filter = false; + bool output_by_row_list = false; + size_t join_data_avg_perkey_rows = 0; + size_t output_by_row_list_threshold = 0; IColumn::Filter filter; void reserve(bool need_replicate) @@ -212,15 +219,22 @@ private: columns.back()->reserve(src_column.column->size()); type_name.emplace_back(src_column.type, src_column.name, qualified_name); } + + /** Build output from the blocks that extract from `RowRef` or `RowRefList`, to avoid block cache miss which may cause performance slow down. + * And This problem would happen it we directly build output from `RowRef` or `RowRefList`. + */ + template + void buildOutputFromBlocks(); }; /// Adapter class to pass into addFoundRowAll /// In joinRightColumnsWithAdditionalFilter we don't want to add rows directly into AddedColumns, /// because they need to be filtered by additional_filter_expression. -class PreSelectedRows : public std::vector +class PreSelectedRows : public std::vector { public: - void appendFromBlock(const Block & block, size_t row_num, bool /* has_default */) { this->emplace_back(&block, row_num); } + void appendFromBlock(const RowRef * row_ref, bool /* has_default */) { this->emplace_back(row_ref); } + static constexpr bool isLazy() { return false; } }; } diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index dd7d42de63e..9c07a71e614 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -495,7 +495,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) } size_t rows = source_block.rows(); - + data->rows_to_join += rows; const auto & right_key_names = table_join->getAllNames(JoinTableSide::Right); ColumnPtrMap all_key_columns(right_key_names.size()); for (const auto & column_name : right_key_names) @@ -647,7 +647,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) total_bytes = getTotalByteCount(); } } - + data->keys_to_join = total_rows; shrinkStoredBlocksToFit(total_bytes); return table_join->sizeLimits().check(total_rows, total_bytes, "JOIN", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED); diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 00f5ef6d214..d645b8e9273 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -345,6 +345,18 @@ public: size_t blocks_allocated_size = 0; size_t blocks_nullmaps_allocated_size = 0; + + /// Number of rows of right table to join + size_t rows_to_join = 0; + /// Number of keys of right table to join + size_t keys_to_join = 0; + + size_t avgPerKeyRows() const + { + if (keys_to_join == 0) + return 0; + return rows_to_join / keys_to_join; + } }; using RightTableDataPtr = std::shared_ptr; diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 3b7a67467e3..97ad57d26ea 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -83,6 +83,7 @@ public: const Block & block_with_columns_to_add, const MapsTemplateVector & maps_, bool is_join_get = false); + private: template static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes); @@ -128,7 +129,7 @@ private: template static ColumnPtr buildAdditionalFilter( size_t left_start_row, - const std::vector & selected_rows, + const std::vector & selected_rows, const std::vector & row_replicate_offset, AddedColumns & added_columns); diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index aedd24630d1..320c8851ce4 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -95,7 +95,10 @@ Block HashJoinMethods::joinBlockImpl( added_columns.join_on_keys.clear(); Block remaining_block = sliceBlock(block, num_joined); - added_columns.buildOutput(); + if (is_join_get) + added_columns.buildJoinGetOutput(); + else + added_columns.buildOutput(); for (size_t i = 0; i < added_columns.size(); ++i) block.insert(added_columns.moveColumn(i)); @@ -339,6 +342,8 @@ size_t HashJoinMethods::joinRightColumns( size_t rows = added_columns.rows_to_add; if constexpr (need_filter) added_columns.filter = IColumn::Filter(rows, 0); + if constexpr (!flag_per_row && (STRICTNESS == JoinStrictness::All || (STRICTNESS == JoinStrictness::Semi && KIND == JoinKind::Right))) + added_columns.output_by_row_list = true; Arena pool; @@ -354,8 +359,8 @@ size_t HashJoinMethods::joinRightColumns( { if (unlikely(current_offset >= max_joined_block_rows)) { - added_columns.offsets_to_replicate->resize_assume_reserved(i); - added_columns.filter.resize_assume_reserved(i); + added_columns.offsets_to_replicate->resize(i); + added_columns.filter.resize(i); break; } } @@ -381,15 +386,15 @@ size_t HashJoinMethods::joinRightColumns( const IColumn & left_asof_key = added_columns.leftAsofKey(); auto row_ref = mapped->findAsof(left_asof_key, i); - if (row_ref.block) + if (row_ref && row_ref->block) { setUsed(added_columns.filter, i); if constexpr (flag_per_row) - used_flags.template setUsed(row_ref.block, row_ref.row_num, 0); + used_flags.template setUsed(row_ref->block, row_ref->row_num, 0); else used_flags.template setUsed(find_result); - added_columns.appendFromBlock(*row_ref.block, row_ref.row_num, join_features.add_missing); + added_columns.appendFromBlock(row_ref, join_features.add_missing); } else addNotFoundRow(added_columns, current_offset); @@ -420,7 +425,7 @@ size_t HashJoinMethods::joinRightColumns( if (used_once) { setUsed(added_columns.filter, i); - added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); + added_columns.appendFromBlock(&mapped, join_features.add_missing); } break; @@ -438,7 +443,7 @@ size_t HashJoinMethods::joinRightColumns( { setUsed(added_columns.filter, i); used_flags.template setUsed(find_result); - added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); + added_columns.appendFromBlock(&mapped, join_features.add_missing); if (join_features.is_any_or_semi_join) { @@ -477,7 +482,7 @@ template template ColumnPtr HashJoinMethods::buildAdditionalFilter( size_t left_start_row, - const std::vector & selected_rows, + const std::vector & selected_rows, const std::vector & row_replicate_offset, AddedColumns & added_columns) { @@ -489,7 +494,7 @@ ColumnPtr HashJoinMethods::buildAdditionalFilter result_column = ColumnUInt8::create(); break; } - const Block & sample_right_block = *selected_rows.begin()->block; + const Block & sample_right_block = *((*selected_rows.begin())->block); if (!sample_right_block || !added_columns.additional_filter_expression) { auto filter = ColumnUInt8::create(); @@ -519,8 +524,8 @@ ColumnPtr HashJoinMethods::buildAdditionalFilter auto new_col = col.column->cloneEmpty(); for (const auto & selected_row : selected_rows) { - const auto & src_col = selected_row.block->getByPosition(right_col_pos); - new_col->insertFrom(*src_col.column, selected_row.row_num); + const auto & src_col = selected_row->block->getByPosition(right_col_pos); + new_col->insertFrom(*src_col.column, selected_row->row_num); } executed_block.insert({std::move(new_col), col.type, col.name}); } @@ -700,26 +705,24 @@ size_t HashJoinMethods::joinRightColumnsWithAddt { // For inner join, we need mark each right row'flag, because we only use each right row once. auto used_once = used_flags.template setUsedOnce( - selected_right_row_it->block, selected_right_row_it->row_num, 0); + (*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0); if (used_once) { any_matched = true; total_added_rows += 1; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); } } } else { auto used_once = used_flags.template setUsedOnce( - selected_right_row_it->block, selected_right_row_it->row_num, 0); + (*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0); if (used_once) { any_matched = true; total_added_rows += 1; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); } } } @@ -727,16 +730,14 @@ size_t HashJoinMethods::joinRightColumnsWithAddt { any_matched = true; if constexpr (join_features.right && join_features.need_flags) - used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); + used_flags.template setUsed((*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0); } else { any_matched = true; total_added_rows += 1; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); - used_flags.template setUsed( - selected_right_row_it->block, selected_right_row_it->row_num, 0); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); + used_flags.template setUsed((*selected_right_row_it)->block, (*selected_right_row_it)->row_num, 0); } } @@ -756,8 +757,7 @@ size_t HashJoinMethods::joinRightColumnsWithAddt if (filter_flags[replicated_row]) { any_matched = true; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); total_added_rows += 1; } ++selected_right_row_it; @@ -767,8 +767,7 @@ size_t HashJoinMethods::joinRightColumnsWithAddt if (filter_flags[replicated_row]) { any_matched = true; - added_columns.appendFromBlock( - *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + added_columns.appendFromBlock(*selected_right_row_it, join_features.add_missing); total_added_rows += 1; selected_right_row_it = selected_right_row_it + row_replicate_offset[i] - replicated_row; break; diff --git a/src/Interpreters/HashJoin/JoinFeatures.h b/src/Interpreters/HashJoin/JoinFeatures.h index b8de606c51e..b39593e7cac 100644 --- a/src/Interpreters/HashJoin/JoinFeatures.h +++ b/src/Interpreters/HashJoin/JoinFeatures.h @@ -18,11 +18,25 @@ struct JoinFeatures static constexpr bool inner = KIND == JoinKind::Inner; static constexpr bool full = KIND == JoinKind::Full; + /** Whether we may need duplicate rows from the left table. + * For example, when we have row (key1, attr1) in left table + * and rows (key1, attr2), (key1, attr3) in right table, + * then we need to duplicate row (key1, attr1) for each of joined rows from right table, so result will be + * (key1, attr1, key1, attr2) + * (key1, attr1, key1, attr3) + */ static constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right); + + /// Whether we need to filter rows from the left table that do not have matches in the right table. static constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left)); + + /// Whether we need to add default values for columns from the left table. static constexpr bool add_missing = (left || full) && !is_semi_join; + /// Whether we need to store flags for rows from the right table table + /// that indicates if they have matches in the left table. static constexpr bool need_flags = MapGetter, HashJoin::MapsAll>>::flagged; + static constexpr bool is_maps_all = std::is_same_v, HashJoin::MapsAll>; }; diff --git a/src/Interpreters/HashJoin/KnowRowsHolder.h b/src/Interpreters/HashJoin/KnowRowsHolder.h index d51c96893c5..9223e98d13c 100644 --- a/src/Interpreters/HashJoin/KnowRowsHolder.h +++ b/src/Interpreters/HashJoin/KnowRowsHolder.h @@ -104,7 +104,7 @@ void addFoundRowAll( { if (!known_rows.isKnown(std::make_pair(it->block, it->row_num))) { - added.appendFromBlock(*it->block, it->row_num, false); + added.appendFromBlock(*it, false); ++current_offset; if (!new_known_rows_ptr) { @@ -124,11 +124,16 @@ void addFoundRowAll( known_rows.add(std::cbegin(*new_known_rows_ptr), std::cend(*new_known_rows_ptr)); } } + else if constexpr (AddedColumns::isLazy()) + { + added.appendFromBlock(&mapped, false); + current_offset += mapped.rows; + } else { for (auto it = mapped.begin(); it.ok(); ++it) { - added.appendFromBlock(*it->block, it->row_num, false); + added.appendFromBlock(*it, false); ++current_offset; } } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 95143031707..0e981700ac4 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -121,6 +121,7 @@ namespace ErrorCodes extern const int SUPPORT_IS_DISABLED; extern const int TOO_MANY_TABLES; extern const int TOO_MANY_DATABASES; + extern const int THERE_IS_NO_COLUMN; } namespace fs = std::filesystem; @@ -847,6 +848,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti } properties.columns = ColumnsDescription(as_select_sample.getNamesAndTypesList()); + properties.columns_inferred_from_select_query = true; } else if (create.as_table_function) { @@ -936,6 +938,105 @@ void validateVirtualColumns(const IStorage & storage) } } +void InterpreterCreateQuery::validateMaterializedViewColumnsAndEngine(const ASTCreateQuery & create, const TableProperties & properties, const DatabasePtr & database) +{ + /// This is not strict validation, just catches common errors that would make the view not work. + /// It's possible to circumvent these checks by ALTERing the view or target table after creation; + /// we should probably do some of these checks on ALTER as well. + + NamesAndTypesList all_output_columns; + bool check_columns = false; + if (create.hasTargetTableID(ViewTarget::To)) + { + if (StoragePtr to_table = DatabaseCatalog::instance().tryGetTable( + create.getTargetTableID(ViewTarget::To), getContext())) + { + all_output_columns = to_table->getInMemoryMetadataPtr()->getSampleBlock().getNamesAndTypesList(); + check_columns = true; + } + } + else if (!properties.columns_inferred_from_select_query) + { + all_output_columns = properties.columns.getInsertable(); + check_columns = true; + } + + if (create.refresh_strategy && !create.refresh_strategy->append) + { + if (database && database->getEngineName() != "Atomic") + throw Exception(ErrorCodes::INCORRECT_QUERY, + "Refreshable materialized views (except with APPEND) only support Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName()); + } + + Block input_block; + + if (check_columns) + { + try + { + if (getContext()->getSettingsRef().allow_experimental_analyzer) + { + input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); + } + else + { + input_block = InterpreterSelectWithUnionQuery(create.select->clone(), + getContext(), + SelectQueryOptions().analyze()).getSampleBlock(); + } + } + catch (Exception &) + { + if (!getContext()->getSettingsRef().allow_materialized_view_with_bad_select) + throw; + check_columns = false; + } + } + + if (check_columns) + { + std::unordered_map output_types; + for (const NameAndTypePair & nt : all_output_columns) + output_types[nt.name] = nt.type; + + ColumnsWithTypeAndName input_columns; + ColumnsWithTypeAndName output_columns; + for (const auto & input_column : input_block) + { + auto it = output_types.find(input_column.name); + if (it != output_types.end()) + { + input_columns.push_back(input_column.cloneEmpty()); + output_columns.push_back(ColumnWithTypeAndName(it->second->createColumn(), it->second, input_column.name)); + } + else if (create.refresh_strategy) + { + /// Unrecognized columns produced by SELECT query are allowed by regular materialized + /// views, but not by refreshable ones. This is in part because it was easier to + /// implement, in part because refreshable views have less concern about ALTERing target + /// tables. + /// + /// The motivating scenario for allowing this in regular MV is ALTERing the table+query. + /// Suppose the user removes a column from target table, then a minute later + /// correspondingly updates the view's query to not produce that column. + /// If MV didn't allow unrecognized columns then during that minute all INSERTs into the + /// source table would fail - unacceptable. + /// For refreshable views, during that minute refreshes will fail - acceptable. + throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "SELECT query outputs column with name '{}', which is not found in the target table. Use 'AS' to assign alias that matches a column name.", input_column.name); + } + } + + if (input_columns.empty()) + throw Exception(ErrorCodes::THERE_IS_NO_COLUMN, "None of the columns produced by the SELECT query are present in the target table. Use 'AS' to assign aliases that match column names."); + + ActionsDAG::makeConvertingActions( + input_columns, + output_columns, + ActionsDAG::MatchColumnsMode::Position + ); + } +} + namespace { void checkTemporaryTableEngineName(const String & name) @@ -1132,13 +1233,6 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data throw Exception(ErrorCodes::LOGICAL_ERROR, "Table UUID is not specified in DDL log"); } - if (create.refresh_strategy && database->getEngineName() != "Atomic") - throw Exception(ErrorCodes::INCORRECT_QUERY, - "Refreshable materialized view requires Atomic database engine, but database {} has engine {}", create.getDatabase(), database->getEngineName()); - /// TODO: Support Replicated databases, only with Shared/ReplicatedMergeTree. - /// Figure out how to make the refreshed data appear all at once on other - /// replicas; maybe a replicated SYSTEM SYNC REPLICA query before the rename? - if (database->getUUID() != UUIDHelpers::Nil) { if (create.attach && !from_path && create.uuid == UUIDHelpers::Nil) @@ -1359,51 +1453,16 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) /// Set and retrieve list of columns, indices and constraints. Set table engine if needed. Rewrite query in canonical way. TableProperties properties = getTablePropertiesAndNormalizeCreateQuery(create, mode); - /// Check type compatible for materialized dest table and select columns - if (create.is_materialized_view_with_external_target() && create.select && mode <= LoadingStrictnessLevel::CREATE) - { - if (StoragePtr to_table = DatabaseCatalog::instance().tryGetTable(create.getTargetTableID(ViewTarget::To), getContext())) - { - Block input_block; - - if (getContext()->getSettingsRef().allow_experimental_analyzer) - { - input_block = InterpreterSelectQueryAnalyzer::getSampleBlock(create.select->clone(), getContext()); - } - else - { - input_block = InterpreterSelectWithUnionQuery(create.select->clone(), - getContext(), - SelectQueryOptions().analyze()).getSampleBlock(); - } - - Block output_block = to_table->getInMemoryMetadataPtr()->getSampleBlock(); - - ColumnsWithTypeAndName input_columns; - ColumnsWithTypeAndName output_columns; - for (const auto & input_column : input_block) - { - if (const auto * output_column = output_block.findByName(input_column.name)) - { - input_columns.push_back(input_column.cloneEmpty()); - output_columns.push_back(output_column->cloneEmpty()); - } - } - - ActionsDAG::makeConvertingActions( - input_columns, - output_columns, - ActionsDAG::MatchColumnsMode::Position - ); - } - } - DatabasePtr database; bool need_add_to_database = !create.temporary; // In case of an ON CLUSTER query, the database may not be present on the initiator node if (need_add_to_database) database = DatabaseCatalog::instance().tryGetDatabase(database_name); + /// Check type compatible for materialized dest table and select columns + if (create.select && create.is_materialized_view && mode <= LoadingStrictnessLevel::CREATE) + validateMaterializedViewColumnsAndEngine(create, properties, database); + bool allow_heavy_populate = getContext()->getSettingsRef().database_replicated_allow_heavy_create && create.is_populate; if (!allow_heavy_populate && database && database->getEngineName() == "Replicated" && (create.select || create.is_populate)) { @@ -1944,6 +2003,8 @@ BlockIO InterpreterCreateQuery::execute() FunctionNameNormalizer::visit(query_ptr.get()); auto & create = query_ptr->as(); + create.if_not_exists |= getContext()->getSettingsRef().create_if_not_exists; + bool is_create_database = create.database && !create.table; if (!create.cluster.empty() && !maybeRemoveOnCluster(query_ptr, getContext())) { diff --git a/src/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h index 3982ea2cabc..5047c372c71 100644 --- a/src/Interpreters/InterpreterCreateQuery.h +++ b/src/Interpreters/InterpreterCreateQuery.h @@ -90,6 +90,7 @@ private: IndicesDescription indices; ConstraintsDescription constraints; ProjectionsDescription projections; + bool columns_inferred_from_select_query = false; }; BlockIO createDatabase(ASTCreateQuery & create); @@ -98,6 +99,7 @@ private: /// Calculate list of columns, constraints, indices, etc... of table. Rewrite query in canonical way. TableProperties getTablePropertiesAndNormalizeCreateQuery(ASTCreateQuery & create, LoadingStrictnessLevel mode) const; void validateTableStructure(const ASTCreateQuery & create, const TableProperties & properties) const; + void validateMaterializedViewColumnsAndEngine(const ASTCreateQuery & create, const TableProperties & properties, const DatabasePtr & database); void setEngine(ASTCreateQuery & create) const; AccessRightsElements getRequiredAccess() const; diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index 291c8e19db0..4827edc6c2a 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -17,6 +17,7 @@ #include #include #include +#include namespace DB @@ -27,7 +28,6 @@ namespace ErrorCodes extern const int TABLE_IS_READ_ONLY; extern const int SUPPORT_IS_DISABLED; extern const int BAD_ARGUMENTS; - extern const int NOT_IMPLEMENTED; extern const int QUERY_IS_PROHIBITED; } @@ -67,13 +67,42 @@ BlockIO InterpreterDeleteQuery::execute() auto table_lock = table->lockForShare(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); auto metadata_snapshot = table->getInMemoryMetadataPtr(); - auto lightweightDelete = [&]() + if (table->supportsDelete()) + { + /// Convert to MutationCommand + MutationCommands mutation_commands; + MutationCommand mut_command; + + mut_command.type = MutationCommand::Type::DELETE; + mut_command.predicate = delete_query.predicate; + + mutation_commands.emplace_back(mut_command); + + table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); + MutationsInterpreter::Settings settings(false); + MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), settings).validate(); + table->mutate(mutation_commands, getContext()); + return {}; + } + else if (table->supportsLightweightDelete()) { if (!getContext()->getSettingsRef().enable_lightweight_delete) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Lightweight delete mutate is disabled. " "Set `enable_lightweight_delete` setting to enable it"); + if (metadata_snapshot->hasProjections()) + { + if (const auto * merge_tree_data = dynamic_cast(table.get())) + if (merge_tree_data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "DELETE query is not allowed for table {} because as it has projections and setting " + "lightweight_mutation_projection_mode is set to THROW. " + "User should change lightweight_mutation_projection_mode OR " + "drop all the projections manually before running the query", + table_id.getFullTableName()); + } + /// Build "ALTER ... UPDATE _row_exists = 0 WHERE predicate" query String alter_query = "ALTER TABLE " + table->getStorageID().getFullTableName() @@ -94,79 +123,9 @@ BlockIO InterpreterDeleteQuery::execute() context->setSetting("mutations_sync", Field(context->getSettingsRef().lightweight_deletes_sync)); InterpreterAlterQuery alter_interpreter(alter_ast, context); return alter_interpreter.execute(); - }; - - if (table->supportsDelete()) - { - /// Convert to MutationCommand - MutationCommands mutation_commands; - MutationCommand mut_command; - - mut_command.type = MutationCommand::Type::DELETE; - mut_command.predicate = delete_query.predicate; - - mutation_commands.emplace_back(mut_command); - - table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); - MutationsInterpreter::Settings settings(false); - MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), settings).validate(); - table->mutate(mutation_commands, getContext()); - return {}; - } - else if (table->supportsLightweightDelete()) - { - return lightweightDelete(); } else { - if (table->hasProjection()) - { - auto context = Context::createCopy(getContext()); - auto mode = context->getSettingsRef().lightweight_mutation_projection_mode; - if (mode == LightweightMutationProjectionMode::THROW) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DELETE query is not supported for table {} as it has projections. " - "User should drop all the projections manually before running the query", - table->getStorageID().getFullTableName()); - } - else if (mode == LightweightMutationProjectionMode::DROP) - { - std::vector all_projections = metadata_snapshot->projections.getAllRegisteredNames(); - - context->setSetting("mutations_sync", Field(context->getSettingsRef().lightweight_deletes_sync)); - - /// Drop projections first so that lightweight delete can be performed. - for (const auto & projection : all_projections) - { - String alter_query = - "ALTER TABLE " + table->getStorageID().getFullTableName() - + (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster)) - + " DROP PROJECTION IF EXISTS " + projection; - - ParserAlterQuery parser; - ASTPtr alter_ast = parseQuery( - parser, - alter_query.data(), - alter_query.data() + alter_query.size(), - "ALTER query", - 0, - DBMS_DEFAULT_MAX_PARSER_DEPTH, - DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); - - InterpreterAlterQuery alter_interpreter(alter_ast, context); - alter_interpreter.execute(); - } - } - else - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Unrecognized lightweight_mutation_projection_mode, only throw and drop are allowed."); - } - - return lightweightDelete(); - } - throw Exception(ErrorCodes::BAD_ARGUMENTS, "DELETE query is not supported for table {}", table->getStorageID().getFullTableName()); diff --git a/src/Interpreters/InterpreterDescribeCacheQuery.cpp b/src/Interpreters/InterpreterDescribeCacheQuery.cpp index c7e863bf260..c7464dc6b77 100644 --- a/src/Interpreters/InterpreterDescribeCacheQuery.cpp +++ b/src/Interpreters/InterpreterDescribeCacheQuery.cpp @@ -20,6 +20,7 @@ static Block getSampleBlock() ColumnWithTypeAndName{std::make_shared(), "max_size"}, ColumnWithTypeAndName{std::make_shared(), "max_elements"}, ColumnWithTypeAndName{std::make_shared(), "max_file_segment_size"}, + ColumnWithTypeAndName{std::make_shared(), "is_initialized"}, ColumnWithTypeAndName{std::make_shared(), "boundary_alignment"}, ColumnWithTypeAndName{std::make_shared>(), "cache_on_write_operations"}, ColumnWithTypeAndName{std::make_shared>(), "cache_hits_threshold"}, @@ -50,6 +51,7 @@ BlockIO InterpreterDescribeCacheQuery::execute() res_columns[i++]->insert(settings.max_size); res_columns[i++]->insert(settings.max_elements); res_columns[i++]->insert(settings.max_file_segment_size); + res_columns[i++]->insert(cache->isInitialized()); res_columns[i++]->insert(settings.boundary_alignment); res_columns[i++]->insert(settings.cache_on_write_operations); res_columns[i++]->insert(settings.cache_hits_threshold); diff --git a/src/Interpreters/InterpreterShowColumnsQuery.cpp b/src/Interpreters/InterpreterShowColumnsQuery.cpp index d8fff4e6026..472cdedf3ae 100644 --- a/src/Interpreters/InterpreterShowColumnsQuery.cpp +++ b/src/Interpreters/InterpreterShowColumnsQuery.cpp @@ -68,6 +68,7 @@ WITH map( 'Map', 'JSON', 'Tuple', 'JSON', 'Object', 'JSON', + 'JSON', 'JSON', 'String', '{}', 'FixedString', '{}') AS native_to_mysql_mapping, )", diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index e5549b2e539..3de6b755609 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -97,7 +97,12 @@ QueryPipeline InterpreterShowCreateQuery::executeImpl() } MutableColumnPtr column = ColumnString::create(); - column->insert(format({.ctx = getContext(), .query = *create_query, .one_line = false})); + column->insert(format( + { + .ctx = getContext(), + .query = *create_query, + .one_line = false + })); return QueryPipeline(std::make_shared(Block{{ std::move(column), diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 1cd55a0020c..21c8b44b374 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -666,6 +666,10 @@ BlockIO InterpreterSystemQuery::execute() for (const auto & task : getRefreshTasks()) task->run(); break; + case Type::WAIT_VIEW: + for (const auto & task : getRefreshTasks()) + task->wait(); + break; case Type::CANCEL_VIEW: for (const auto & task : getRefreshTasks()) task->cancel(); @@ -1409,6 +1413,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() break; } case Type::REFRESH_VIEW: + case Type::WAIT_VIEW: case Type::START_VIEW: case Type::START_VIEWS: case Type::STOP_VIEW: diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 24635870e62..0b93b5989b1 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -500,6 +500,12 @@ static void validateUpdateColumns( throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "There is no column {} in table", backQuote(column_name)); } } + else if (storage_columns.getColumn(GetColumnsOptions::Ordinary, column_name).type->hasDynamicSubcolumns()) + { + throw Exception(ErrorCodes::CANNOT_UPDATE_COLUMN, + "Cannot update column {} with type {}: updates of columns with dynamic subcolumns are not supported", + backQuote(column_name), storage_columns.getColumn(GetColumnsOptions::Ordinary, column_name).type->getName()); + } } } diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index 248ba947bc1..accb73e12df 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -244,9 +244,6 @@ public: /// Same as checkTimeLimit but it never throws [[nodiscard]] bool checkTimeLimitSoft(); - /// Use it in case of the query left in background to execute asynchronously - void updateContext(ContextWeakPtr weak_context) { context = std::move(weak_context); } - /// Get the reference for the start of the query. Used to synchronize with other Stopwatches UInt64 getQueryCPUStartTime() { return watch.getStart(); } }; diff --git a/src/Interpreters/RowRefs.cpp b/src/Interpreters/RowRefs.cpp index 9785ba46dab..1b397ab56ef 100644 --- a/src/Interpreters/RowRefs.cpp +++ b/src/Interpreters/RowRefs.cpp @@ -144,7 +144,7 @@ public: return low; } - RowRef findAsof(const IColumn & asof_column, size_t row_num) override + RowRef * findAsof(const IColumn & asof_column, size_t row_num) override { sort(); @@ -156,10 +156,10 @@ public: if (pos != entries.size()) { size_t row_ref_index = entries[pos].row_ref_index; - return row_refs[row_ref_index]; + return &row_refs[row_ref_index]; } - return {nullptr, 0}; + return nullptr; } private: diff --git a/src/Interpreters/RowRefs.h b/src/Interpreters/RowRefs.h index 650b2311ba7..7c98c47dd11 100644 --- a/src/Interpreters/RowRefs.h +++ b/src/Interpreters/RowRefs.h @@ -122,7 +122,7 @@ struct RowRefList : RowRef }; RowRefList() {} /// NOLINT - RowRefList(const Block * block_, size_t row_num_) : RowRef(block_, row_num_) {} + RowRefList(const Block * block_, size_t row_num_) : RowRef(block_, row_num_), rows(1) {} ForwardIterator begin() const { return ForwardIterator(this); } @@ -135,8 +135,11 @@ struct RowRefList : RowRef *next = Batch(nullptr); } next = next->insert(std::move(row_ref), pool); + ++rows; } +public: + SizeT rows = 0; private: Batch * next = nullptr; }; @@ -158,7 +161,7 @@ struct SortedLookupVectorBase virtual void insert(const IColumn &, const Block *, size_t) = 0; // This needs to be synchronized internally - virtual RowRef findAsof(const IColumn &, size_t) = 0; + virtual RowRef * findAsof(const IColumn &, size_t) = 0; }; diff --git a/src/Interpreters/StorageID.h b/src/Interpreters/StorageID.h index f9afbc7b98d..ad55d16e284 100644 --- a/src/Interpreters/StorageID.h +++ b/src/Interpreters/StorageID.h @@ -27,7 +27,6 @@ class ASTQueryWithTableAndOutput; class ASTTableIdentifier; class Context; -// TODO(ilezhankin): refactor and merge |ASTTableIdentifier| struct StorageID { String database_name; diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index c8c926db13c..138085f0710 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -115,6 +115,7 @@ TableJoin::TableJoin(const Settings & settings, VolumePtr tmp_volume_, Temporary , partial_merge_join_left_table_buffer_bytes(settings.partial_merge_join_left_table_buffer_bytes) , max_files_to_merge(settings.join_on_disk_max_files_to_merge) , temporary_files_codec(settings.temporary_files_codec) + , output_by_rowlist_perkey_rows_threshold(settings.join_output_by_rowlist_perkey_rows_threshold) , max_memory_usage(settings.max_memory_usage) , tmp_volume(tmp_volume_) , tmp_data(tmp_data_) diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index 3f2bebb5816..4d626084d81 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -148,6 +148,7 @@ private: const size_t partial_merge_join_left_table_buffer_bytes = 0; const size_t max_files_to_merge = 0; const String temporary_files_codec = "LZ4"; + const size_t output_by_rowlist_perkey_rows_threshold = 0; /// Value if setting max_memory_usage for query, can be used when max_bytes_in_join is not specified. size_t max_memory_usage = 0; @@ -295,6 +296,7 @@ public: return join_use_nulls && isRightOrFull(kind()); } + size_t outputByRowListPerkeyRowsThreshold() const { return output_by_rowlist_perkey_rows_threshold; } size_t defaultMaxBytes() const { return default_max_bytes; } size_t maxJoinedBlockRows() const { return max_joined_block_rows; } size_t maxRowsInRightBlock() const { return partial_merge_join_rows_in_right_blocks; } diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 7f0fb8cd6ca..3259d7b67d6 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -65,7 +65,7 @@ TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, Cu std::unique_ptr TemporaryDataOnDisk::createRawStream(size_t max_file_size) { - if (file_cache) + if (file_cache && file_cache->isInitialized()) { auto holder = createCacheFile(max_file_size); return std::make_unique(std::move(holder)); @@ -81,7 +81,7 @@ std::unique_ptr TemporaryDataOnDisk::createRawStream(si TemporaryFileStream & TemporaryDataOnDisk::createStream(const Block & header, size_t max_file_size) { - if (file_cache) + if (file_cache && file_cache->isInitialized()) { auto holder = createCacheFile(max_file_size); diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 1f7c6b1fe68..ca8f8d235fa 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -473,11 +473,11 @@ void ThreadStatus::initGlobalProfiler([[maybe_unused]] UInt64 global_profiler_re { if (global_profiler_real_time_period > 0) query_profiler_real = std::make_unique(thread_id, - /* period= */ static_cast(global_profiler_real_time_period)); + /* period= */ global_profiler_real_time_period); if (global_profiler_cpu_time_period > 0) query_profiler_cpu = std::make_unique(thread_id, - /* period= */ static_cast(global_profiler_cpu_time_period)); + /* period= */ global_profiler_cpu_time_period); } catch (...) { @@ -506,18 +506,18 @@ void ThreadStatus::initQueryProfiler() { if (!query_profiler_real) query_profiler_real = std::make_unique(thread_id, - /* period= */ static_cast(settings.query_profiler_real_time_period_ns)); + /* period= */ settings.query_profiler_real_time_period_ns); else - query_profiler_real->setPeriod(static_cast(settings.query_profiler_real_time_period_ns)); + query_profiler_real->setPeriod(settings.query_profiler_real_time_period_ns); } if (settings.query_profiler_cpu_time_period_ns > 0) { if (!query_profiler_cpu) query_profiler_cpu = std::make_unique(thread_id, - /* period= */ static_cast(settings.query_profiler_cpu_time_period_ns)); + /* period= */ settings.query_profiler_cpu_time_period_ns); else - query_profiler_cpu->setPeriod(static_cast(settings.query_profiler_cpu_time_period_ns)); + query_profiler_cpu->setPeriod(settings.query_profiler_cpu_time_period_ns); } } catch (...) diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 14094c3cccf..f31522ae649 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -47,10 +47,10 @@ #include #include -#include -#include -#include #include +#include +#include +#include #include #include @@ -1173,9 +1173,9 @@ bool TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select if (object_pos != std::string::npos) { String object_name = it->substr(0, object_pos); - if (pair.name == object_name && pair.type->getTypeId() == TypeIndex::Object) + if (pair.name == object_name && pair.type->getTypeId() == TypeIndex::ObjectDeprecated) { - const auto * object_type = typeid_cast(pair.type.get()); + const auto * object_type = typeid_cast(pair.type.get()); if (object_type->getSchemaFormat() == "json" && object_type->hasNullableSubcolumns()) { missed_subcolumns.insert(*it); diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 738c51baa64..7e1b4e2fb0e 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -463,7 +463,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID return src; } - else if (isObject(type)) + else if (isObjectDeprecated(type)) { if (src.getType() == Field::Types::Object) return src; /// Already in needed type. @@ -523,6 +523,13 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID /// We can insert any field to Dynamic column. return src; } + else if (isObject(type)) + { + if (src.getType() == Field::Types::Object) + return src; /// Already in needed type. + + /// TODO: add conversion from Map/Tuple to Object. + } /// Conversion from string by parsing. if (src.getType() == Field::Types::String) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index fe87eed5570..decc16a3704 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -786,7 +786,7 @@ static std::tuple executeQueryImpl( /// Verify that AST formatting is consistent: /// If you format AST, parse it back, and format it again, you get the same string. - String formatted1 = ast->formatWithPossiblyHidingSensitiveData(0, true, true); + String formatted1 = ast->formatWithPossiblyHidingSensitiveData(0, true, true, false); /// The query can become more verbose after formatting, so: size_t new_max_query_size = max_query_size > 0 ? (1000 + 2 * max_query_size) : 0; @@ -811,7 +811,7 @@ static std::tuple executeQueryImpl( chassert(ast2); - String formatted2 = ast2->formatWithPossiblyHidingSensitiveData(0, true, true); + String formatted2 = ast2->formatWithPossiblyHidingSensitiveData(0, true, true, false); if (formatted1 != formatted2) throw Exception(ErrorCodes::LOGICAL_ERROR, diff --git a/src/Interpreters/formatWithPossiblyHidingSecrets.h b/src/Interpreters/formatWithPossiblyHidingSecrets.h index ea8c295b169..14e84f1d1a4 100644 --- a/src/Interpreters/formatWithPossiblyHidingSecrets.h +++ b/src/Interpreters/formatWithPossiblyHidingSecrets.h @@ -25,7 +25,8 @@ inline String format(const SecretHidingFormatSettings & settings) && settings.ctx->getSettingsRef().format_display_secrets_in_show_and_select && settings.ctx->getAccess()->isGranted(AccessType::displaySecretsInShowAndSelect); - return settings.query.formatWithPossiblyHidingSensitiveData(settings.max_length, settings.one_line, show_secrets); + return settings.query.formatWithPossiblyHidingSensitiveData( + settings.max_length, settings.one_line, show_secrets, settings.ctx->getSettingsRef().print_pretty_type_names); } } diff --git a/src/Interpreters/parseColumnsListForTableFunction.cpp b/src/Interpreters/parseColumnsListForTableFunction.cpp index b9fdaabede1..0c6d18dca70 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.cpp +++ b/src/Interpreters/parseColumnsListForTableFunction.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -30,6 +31,7 @@ DataTypeValidationSettings::DataTypeValidationSettings(const DB::Settings& setti , allow_suspicious_variant_types(settings.allow_suspicious_variant_types) , validate_nested_types(settings.validate_experimental_and_suspicious_types_inside_nested_types) , allow_experimental_dynamic_type(settings.allow_experimental_dynamic_type) + , allow_experimental_json_type(settings.allow_experimental_json_type) { } @@ -123,7 +125,7 @@ void validateDataType(const DataTypePtr & type_to_check, const DataTypeValidatio if (!settings.allow_experimental_dynamic_type) { - if (data_type.hasDynamicSubcolumns()) + if (isDynamic(data_type)) { throw Exception( ErrorCodes::ILLEGAL_COLUMN, @@ -132,6 +134,19 @@ void validateDataType(const DataTypePtr & type_to_check, const DataTypeValidatio data_type.getName()); } } + + if (!settings.allow_experimental_json_type) + { + const auto * object_type = typeid_cast(&data_type); + if (object_type && object_type->getSchemaFormat() == DataTypeObject::SchemaFormat::JSON) + { + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Cannot create column with type '{}' because experimental JSON type is not allowed. " + "Set setting allow_experimental_json_type = 1 in order to allow it", + data_type.getName()); + } + } }; validate_callback(*type_to_check); diff --git a/src/Interpreters/parseColumnsListForTableFunction.h b/src/Interpreters/parseColumnsListForTableFunction.h index 6f15c585e4f..6e00492c0ad 100644 --- a/src/Interpreters/parseColumnsListForTableFunction.h +++ b/src/Interpreters/parseColumnsListForTableFunction.h @@ -23,6 +23,7 @@ struct DataTypeValidationSettings bool allow_suspicious_variant_types = true; bool validate_nested_types = true; bool allow_experimental_dynamic_type = true; + bool allow_experimental_json_type = true; }; void validateDataType(const DataTypePtr & type, const DataTypeValidationSettings & settings); diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index 36acc319f4e..bb3df734b2a 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -42,6 +43,7 @@ #include #include +using namespace std::chrono_literals; namespace fs = std::filesystem; using namespace DB; @@ -246,7 +248,8 @@ void download(FileSegment & file_segment) ASSERT_EQ(file_segment.state(), State::DOWNLOADING); ASSERT_EQ(file_segment.getDownloadedSize(), 0); - ASSERT_TRUE(file_segment.reserve(file_segment.range().size(), 1000)); + std::string failure_reason; + ASSERT_TRUE(file_segment.reserve(file_segment.range().size(), 1000, failure_reason)); download(cache_base_path, file_segment); ASSERT_EQ(file_segment.state(), State::DOWNLOADING); @@ -258,7 +261,8 @@ void assertDownloadFails(FileSegment & file_segment) { ASSERT_EQ(file_segment.getOrSetDownloader(), FileSegment::getCallerId()); ASSERT_EQ(file_segment.getDownloadedSize(), 0); - ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000)); + std::string failure_reason; + ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000, failure_reason)); file_segment.complete(); } @@ -358,9 +362,11 @@ TEST_F(FileCacheTest, LRUPolicy) settings.max_size = 30; settings.max_elements = 5; settings.boundary_alignment = 1; + settings.load_metadata_asynchronously = false; const size_t file_size = INT_MAX; // the value doesn't really matter because boundary_alignment == 1. + const auto user = FileCache::getCommonUser(); { std::cerr << "Step 1\n"; @@ -815,6 +821,7 @@ TEST_F(FileCacheTest, writeBuffer) settings.max_elements = 5; settings.max_file_segment_size = 5; settings.base_path = cache_base_path; + settings.load_metadata_asynchronously = false; FileCache cache("6", settings); cache.initialize(); @@ -946,6 +953,7 @@ TEST_F(FileCacheTest, temporaryData) settings.max_size = 10_KiB; settings.max_file_segment_size = 1_KiB; settings.base_path = cache_base_path; + settings.load_metadata_asynchronously = false; DB::FileCache file_cache("7", settings); file_cache.initialize(); @@ -957,10 +965,11 @@ TEST_F(FileCacheTest, temporaryData) { ASSERT_EQ(some_data_holder->size(), 5); + std::string failure_reason; for (auto & segment : *some_data_holder) { ASSERT_TRUE(segment->getOrSetDownloader() == DB::FileSegment::getCallerId()); - ASSERT_TRUE(segment->reserve(segment->range().size(), 1000)); + ASSERT_TRUE(segment->reserve(segment->range().size(), 1000, failure_reason)); download(*segment); segment->complete(); } @@ -1073,6 +1082,7 @@ TEST_F(FileCacheTest, CachedReadBuffer) settings.max_size = 30; settings.max_elements = 10; settings.boundary_alignment = 1; + settings.load_metadata_asynchronously = false; ReadSettings read_settings; read_settings.enable_filesystem_cache = true; @@ -1092,6 +1102,7 @@ TEST_F(FileCacheTest, CachedReadBuffer) auto cache = std::make_shared("8", settings); cache->initialize(); + auto key = cache->createKeyForPath(file_path); const auto user = FileCache::getCommonUser(); @@ -1132,6 +1143,7 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) settings.max_size = 10_KiB; settings.max_file_segment_size = 1_KiB; settings.base_path = cache_base_path; + settings.load_metadata_asynchronously = false; DB::FileCache file_cache("cache", settings); file_cache.initialize(); @@ -1195,6 +1207,7 @@ TEST_F(FileCacheTest, SLRUPolicy) settings.max_size = 40; settings.max_elements = 6; settings.boundary_alignment = 1; + settings.load_metadata_asynchronously = false; settings.cache_policy = "SLRU"; settings.slru_size_ratio = 0.5; @@ -1307,6 +1320,7 @@ TEST_F(FileCacheTest, SLRUPolicy) settings2.boundary_alignment = 1; settings2.cache_policy = "SLRU"; settings2.slru_size_ratio = 0.5; + settings.load_metadata_asynchronously = false; auto cache = std::make_shared("slru_2", settings2); cache->initialize(); diff --git a/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp index c96499095d5..23d653012f8 100644 --- a/src/Parsers/ASTColumnDeclaration.cpp +++ b/src/Parsers/ASTColumnDeclaration.cpp @@ -66,17 +66,13 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & format_settings, Fo { frame.need_parens = false; - /// We have to always backquote column names to avoid ambiguouty with INDEX and other declarations in CREATE query. + /// We have to always backquote column names to avoid ambiguity with INDEX and other declarations in CREATE query. format_settings.ostr << backQuote(name); if (type) { format_settings.ostr << ' '; - - FormatStateStacked type_frame = frame; - type_frame.indent = 0; - - type->formatImpl(format_settings, state, type_frame); + type->formatImpl(format_settings, state, frame); } if (null_modifier) diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index 359e93ab269..d7f5b8f9702 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -256,6 +256,8 @@ ASTPtr ASTCreateQuery::clone() const res->set(res->dictionary, dictionary->clone()); } + if (refresh_strategy) + res->set(res->refresh_strategy, refresh_strategy->clone()); if (as_table_function) res->set(res->as_table_function, as_table_function->clone()); if (comment) diff --git a/src/Parsers/ASTDataType.cpp b/src/Parsers/ASTDataType.cpp index 3c17ae8c380..4211347fb74 100644 --- a/src/Parsers/ASTDataType.cpp +++ b/src/Parsers/ASTDataType.cpp @@ -40,12 +40,22 @@ void ASTDataType::formatImpl(const FormatSettings & settings, FormatState & stat { settings.ostr << '(' << (settings.hilite ? hilite_none : ""); - for (size_t i = 0, size = arguments->children.size(); i < size; ++i) + if (!settings.one_line && settings.print_pretty_type_names && name == "Tuple") { - if (i != 0) - settings.ostr << ", "; - - arguments->children[i]->formatImpl(settings, state, frame); + ++frame.indent; + std::string indent_str = settings.one_line ? "" : "\n" + std::string(4 * frame.indent, ' '); + for (size_t i = 0, size = arguments->children.size(); i < size; ++i) + { + if (i != 0) + settings.ostr << ','; + settings.ostr << indent_str; + arguments->children[i]->formatImpl(settings, state, frame); + } + } + else + { + frame.expression_list_prepend_whitespace = false; + arguments->formatImpl(settings, state, frame); } settings.ostr << (settings.hilite ? hilite_function : "") << ')'; diff --git a/src/Parsers/ASTExpressionList.cpp b/src/Parsers/ASTExpressionList.cpp index 61ac482af82..f345b0c6a6f 100644 --- a/src/Parsers/ASTExpressionList.cpp +++ b/src/Parsers/ASTExpressionList.cpp @@ -42,7 +42,8 @@ void ASTExpressionList::formatImpl(const FormatSettings & settings, FormatState void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { - std::string indent_str = "\n" + std::string(4 * (frame.indent + 1), ' '); + ++frame.indent; + std::string indent_str = "\n" + std::string(4 * frame.indent, ' '); if (frame.expression_list_prepend_whitespace) { @@ -50,8 +51,6 @@ void ASTExpressionList::formatImplMultiline(const FormatSettings & settings, For settings.ostr << ' '; } - ++frame.indent; - for (size_t i = 0, size = children.size(); i < size; ++i) { if (i && separator) diff --git a/src/Parsers/ASTNameTypePair.cpp b/src/Parsers/ASTNameTypePair.cpp index e4066081a9b..1515700365f 100644 --- a/src/Parsers/ASTNameTypePair.cpp +++ b/src/Parsers/ASTNameTypePair.cpp @@ -23,12 +23,8 @@ ASTPtr ASTNameTypePair::clone() const void ASTNameTypePair::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { - std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); - - settings.ostr << indent_str << backQuoteIfNeed(name) << ' '; + settings.ostr << backQuoteIfNeed(name) << ' '; type->formatImpl(settings, state, frame); } } - - diff --git a/src/Parsers/ASTObjectTypeArgument.cpp b/src/Parsers/ASTObjectTypeArgument.cpp new file mode 100644 index 00000000000..975f0389505 --- /dev/null +++ b/src/Parsers/ASTObjectTypeArgument.cpp @@ -0,0 +1,64 @@ +#include +#include +#include + + +namespace DB +{ + +ASTPtr ASTObjectTypeArgument::clone() const +{ + auto res = std::make_shared(*this); + res->children.clear(); + + if (path_with_type) + { + res->path_with_type = path_with_type->clone(); + res->children.push_back(res->path_with_type); + } + else if (skip_path) + { + res->skip_path = skip_path->clone(); + res->children.push_back(res->skip_path); + } + else if (skip_path_regexp) + { + res->skip_path_regexp = skip_path_regexp->clone(); + res->children.push_back(res->skip_path_regexp); + } + else if (parameter) + { + res->parameter = parameter->clone(); + res->children.push_back(res->parameter); + } + + return res; +} + +void ASTObjectTypeArgument::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + if (path_with_type) + { + path_with_type->formatImpl(settings, state, frame); + } + else if (parameter) + { + parameter->formatImpl(settings, state, frame); + } + else if (skip_path) + { + std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); + settings.ostr << indent_str << "SKIP" << ' '; + skip_path->formatImpl(settings, state, frame); + } + else if (skip_path_regexp) + { + std::string indent_str = settings.one_line ? "" : std::string(4 * frame.indent, ' '); + settings.ostr << indent_str << "SKIP REGEXP" << ' '; + skip_path_regexp->formatImpl(settings, state, frame); + } +} + +} + + diff --git a/src/Parsers/ASTObjectTypeArgument.h b/src/Parsers/ASTObjectTypeArgument.h new file mode 100644 index 00000000000..ab18d00d770 --- /dev/null +++ b/src/Parsers/ASTObjectTypeArgument.h @@ -0,0 +1,33 @@ +#pragma once + +#include + + +namespace DB +{ + +/** An argument of Object data type declaration (for example for JSON). Can contain one of: + * - pair (path, data type) + * - path that should be skipped + * - path regexp for paths that should be skipped + * - setting in a form of `setting=N` + */ +class ASTObjectTypeArgument : public IAST +{ +public: + ASTPtr path_with_type; + ASTPtr skip_path; + ASTPtr skip_path_regexp; + ASTPtr parameter; + + /** Get the text that identifies this element. */ + String getID(char) const override { return "ASTObjectTypeArgument"; } + ASTPtr clone() const override; + +protected: + void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; +}; + + +} + diff --git a/src/Parsers/ASTRefreshStrategy.cpp b/src/Parsers/ASTRefreshStrategy.cpp index 2e0c6ee4638..d10c1b4e7f5 100644 --- a/src/Parsers/ASTRefreshStrategy.cpp +++ b/src/Parsers/ASTRefreshStrategy.cpp @@ -20,7 +20,6 @@ ASTPtr ASTRefreshStrategy::clone() const res->set(res->settings, settings->clone()); if (dependencies) res->set(res->dependencies, dependencies->clone()); - res->schedule_kind = schedule_kind; return res; } @@ -66,6 +65,8 @@ void ASTRefreshStrategy::formatImpl( f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << " SETTINGS " << (f_settings.hilite ? hilite_none : ""); settings->formatImpl(f_settings, state, frame); } + if (append) + f_settings.ostr << (f_settings.hilite ? hilite_keyword : "") << " APPEND" << (f_settings.hilite ? hilite_none : ""); } } diff --git a/src/Parsers/ASTRefreshStrategy.h b/src/Parsers/ASTRefreshStrategy.h index ca248b76b40..bb5ac97c054 100644 --- a/src/Parsers/ASTRefreshStrategy.h +++ b/src/Parsers/ASTRefreshStrategy.h @@ -24,6 +24,7 @@ public: ASTTimeInterval * offset = nullptr; ASTTimeInterval * spread = nullptr; RefreshScheduleKind schedule_kind{RefreshScheduleKind::UNKNOWN}; + bool append = false; String getID(char) const override { return "Refresh strategy definition"; } diff --git a/src/Parsers/ASTRenameQuery.h b/src/Parsers/ASTRenameQuery.h index d51c382f374..39fc4f787ec 100644 --- a/src/Parsers/ASTRenameQuery.h +++ b/src/Parsers/ASTRenameQuery.h @@ -141,6 +141,19 @@ public: QueryKind getQueryKind() const override { return QueryKind::Rename; } + void addElement(const String & from_db, const String & from_table, const String & to_db, const String & to_table) + { + auto identifier = [&](const String & name) -> ASTPtr + { + if (name.empty()) + return nullptr; + ASTPtr ast = std::make_shared(name); + children.push_back(ast); + return ast; + }; + elements.push_back(Element {.from = Table {.database = identifier(from_db), .table = identifier(from_table)}, .to = Table {.database = identifier(to_db), .table = identifier(to_table)}}); + } + protected: void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override { diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index a730ea0ba3d..b5e5e0f208d 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -198,6 +198,29 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s print_database_table(); } + if (sync_replica_mode != SyncReplicaMode::DEFAULT) + { + settings.ostr << ' '; + print_keyword(magic_enum::enum_name(sync_replica_mode)); + + // If the mode is LIGHTWEIGHT and specific source replicas are specified + if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty()) + { + settings.ostr << ' '; + print_keyword("FROM"); + settings.ostr << ' '; + + bool first = true; + for (const auto & src : src_replicas) + { + if (!first) + settings.ostr << ", "; + first = false; + settings.ostr << quoteString(src); + } + } + } + if (query_settings) { settings.ostr << (settings.hilite ? hilite_keyword : "") << settings.nl_or_ws << "SETTINGS " << (settings.hilite ? hilite_none : ""); @@ -233,28 +256,6 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s print_identifier(disk); } - if (sync_replica_mode != SyncReplicaMode::DEFAULT) - { - settings.ostr << ' '; - print_keyword(magic_enum::enum_name(sync_replica_mode)); - - // If the mode is LIGHTWEIGHT and specific source replicas are specified - if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty()) - { - settings.ostr << ' '; - print_keyword("FROM"); - settings.ostr << ' '; - - bool first = true; - for (const auto & src : src_replicas) - { - if (!first) - settings.ostr << ", "; - first = false; - settings.ostr << quoteString(src); - } - } - } break; } case Type::SYNC_DATABASE_REPLICA: @@ -375,6 +376,7 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s case Type::START_VIEW: case Type::STOP_VIEW: case Type::CANCEL_VIEW: + case Type::WAIT_VIEW: { settings.ostr << ' '; print_database_table(); diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 167e724dcee..59de90b1d8e 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -95,6 +95,7 @@ public: START_CLEANUP, RESET_COVERAGE, REFRESH_VIEW, + WAIT_VIEW, START_VIEW, START_VIEWS, STOP_VIEW, diff --git a/src/Parsers/CommonParsers.h b/src/Parsers/CommonParsers.h index 34df4b135bb..cc2ee79cd36 100644 --- a/src/Parsers/CommonParsers.h +++ b/src/Parsers/CommonParsers.h @@ -371,6 +371,7 @@ namespace DB MR_MACROS(POPULATE, "POPULATE") \ MR_MACROS(PRECEDING, "PRECEDING") \ MR_MACROS(PRECISION, "PRECISION") \ + MR_MACROS(PREFIX, "PREFIX") \ MR_MACROS(PREWHERE, "PREWHERE") \ MR_MACROS(PRIMARY_KEY, "PRIMARY KEY") \ MR_MACROS(PRIMARY, "PRIMARY") \ @@ -449,6 +450,7 @@ namespace DB MR_MACROS(SHOW, "SHOW") \ MR_MACROS(SIGNED, "SIGNED") \ MR_MACROS(SIMPLE, "SIMPLE") \ + MR_MACROS(SKIP, "SKIP") \ MR_MACROS(SOURCE, "SOURCE") \ MR_MACROS(SPATIAL, "SPATIAL") \ MR_MACROS(SQL_SECURITY, "SQL SECURITY") \ @@ -641,6 +643,32 @@ protected: } }; +class ParserTokenSequence : public IParserBase +{ +private: + std::vector token_types; +public: + ParserTokenSequence(const std::vector & token_types_) : token_types(token_types_) {} /// NOLINT + +protected: + const char * getName() const override { return "token sequence"; } + + bool parseImpl(Pos & pos, ASTPtr & /*node*/, Expected & expected) override + { + for (auto token_type : token_types) + { + if (pos->type != token_type) + { + expected.add(pos, getTokenName(token_type)); + return false; + } + + ++pos; + } + + return true; + } +}; // Parser always returns true and do nothing. class ParserNothing : public IParserBase diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index de395d120d7..61b5723072e 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -282,22 +282,106 @@ bool ParserTableAsStringLiteralIdentifier::parseImpl(Pos & pos, ASTPtr & node, E return true; } +namespace +{ + +/// Parser of syntax sugar for reading JSON subcolumns of type Array(JSON): +/// json.a.b[][].c -> json.a.b.:Array(Array(JSON)).c +class ParserArrayOfJSONIdentifierAddition : public IParserBase +{ +public: + String getLastArrayOfJSONSubcolumnIdentifier() const + { + String subcolumn = ":`"; + for (size_t i = 0; i != last_array_level; ++i) + subcolumn += "Array("; + subcolumn += "JSON"; + for (size_t i = 0; i != last_array_level; ++i) + subcolumn += ")"; + return subcolumn + "`"; + } + +protected: + const char * getName() const override { return "ParserArrayOfJSONIdentifierDelimiter"; } + + bool parseImpl(Pos & pos, ASTPtr & /*node*/, Expected & expected) override + { + last_array_level = 0; + ParserTokenSequence brackets_parser(std::vector{TokenType::OpeningSquareBracket, TokenType::ClosingSquareBracket}); + if (!brackets_parser.check(pos, expected)) + return false; + ++last_array_level; + while (brackets_parser.check(pos, expected)) + ++last_array_level; + return true; + } + +private: + size_t last_array_level; +}; + +} bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - ASTPtr id_list; - if (!ParserList(std::make_unique(allow_query_parameter, highlight_type), std::make_unique(TokenType::Dot), false) - .parse(pos, id_list, expected)) - return false; + auto element_parser = std::make_unique(allow_query_parameter, highlight_type); + std::vector> delimiter_parsers; + delimiter_parsers.emplace_back(std::make_unique(std::vector{TokenType::Dot, TokenType::Colon}), SpecialDelimiter::JSON_PATH_DYNAMIC_TYPE); + delimiter_parsers.emplace_back(std::make_unique(std::vector{TokenType::Dot, TokenType::Caret}), SpecialDelimiter::JSON_PATH_PREFIX); + delimiter_parsers.emplace_back(std::make_unique(TokenType::Dot), SpecialDelimiter::NONE); + ParserArrayOfJSONIdentifierAddition array_of_json_identifier_addition; std::vector parts; + SpecialDelimiter last_special_delimiter = SpecialDelimiter::NONE; ASTs params; - const auto & list = id_list->as(); - for (const auto & child : list.children) + + bool is_first = true; + Pos begin = pos; + while (true) { - parts.emplace_back(getIdentifierName(child)); + ASTPtr element; + if (!element_parser->parse(pos, element, expected)) + { + if (is_first) + return false; + pos = begin; + break; + } + + if (last_special_delimiter != SpecialDelimiter::NONE) + { + parts.push_back(static_cast(last_special_delimiter) + backQuote(getIdentifierName(element))); + } + else + { + parts.push_back(getIdentifierName(element)); + /// Check if we have Array of JSON subcolumn additioon after identifier + /// and replace it with corresponding type subcolumn. + if (!is_first && array_of_json_identifier_addition.check(pos, expected)) + parts.push_back(array_of_json_identifier_addition.getLastArrayOfJSONSubcolumnIdentifier()); + } + if (parts.back().empty()) - params.push_back(child->as()->getParam()); + params.push_back(element->as()->getParam()); + + is_first = false; + begin = pos; + bool parsed_delimiter = false; + for (const auto & [parser, special_delimiter] : delimiter_parsers) + { + if (parser->check(pos, expected)) + { + parsed_delimiter = true; + last_special_delimiter = special_delimiter; + break; + } + } + + if (!parsed_delimiter) + { + pos = begin; + break; + } } ParserKeyword s_uuid(Keyword::UUID); @@ -769,9 +853,10 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected /// Parse numbers (including decimals), strings, arrays and tuples of them. + Pos begin = pos; const char * data_begin = pos->begin; const char * data_end = pos->end; - bool is_string_literal = pos->type == StringLiteral; + ASTPtr string_literal; if (pos->type == Minus) { @@ -782,10 +867,15 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected data_end = pos->end; ++pos; } - else if (pos->type == Number || is_string_literal) + else if (pos->type == Number) { ++pos; } + else if (pos->type == StringLiteral) + { + if (!ParserStringLiteral().parse(begin, string_literal, expected)) + return false; + } else if (isOneOf(pos->type)) { TokenType last_token = OpeningSquareBracket; @@ -853,20 +943,18 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (ParserToken(DoubleColon).ignore(pos, expected) && ParserDataType().parse(pos, type_ast, expected)) { - String s; size_t data_size = data_end - data_begin; - if (is_string_literal) + if (string_literal) { - ReadBufferFromMemory buf(data_begin, data_size); - readQuotedStringWithSQLStyle(s, buf); - assert(buf.count() == data_size); + node = createFunctionCast(string_literal, type_ast); + return true; } else - s = String(data_begin, data_size); - - auto literal = std::make_shared(std::move(s)); - node = createFunctionCast(literal, type_ast); - return true; + { + auto literal = std::make_shared(String(data_begin, data_size)); + node = createFunctionCast(literal, type_ast); + return true; + } } return false; diff --git a/src/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h index 0209e785bff..903111f32db 100644 --- a/src/Parsers/ExpressionElementParsers.h +++ b/src/Parsers/ExpressionElementParsers.h @@ -52,11 +52,22 @@ protected: /** An identifier, possibly containing a dot, for example, x_yz123 or `something special` or Hits.EventTime, - * possibly with UUID clause like `db name`.`table name` UUID 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' + * possibly with UUID clause like `db name`.`table name` UUID 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'. + * There is also special delimiters `.:` and `.^` for JSON type subcolumns. In case of special delimiter + * the next identifier part after it will include special delimiter and be back quoted always: json.a.b.:UInt32 -> ['json', 'a', 'b', ':`UInt32`']. + * It's needed to distinguish identifiers json.a.b.:UInt32 and json.a.b.`:UInt32`. + * There is also a special syntax sugar for reading JSON subcolumns of type Array(JSON): json.a.b[][].c -> json.a.b.:Array(Array(JSON)).c */ class ParserCompoundIdentifier : public IParserBase { public: + enum class SpecialDelimiter : char + { + NONE = '\0', + JSON_PATH_DYNAMIC_TYPE = ':', + JSON_PATH_PREFIX = '^', + }; + explicit ParserCompoundIdentifier(bool table_name_with_optional_uuid_ = false, bool allow_query_parameter_ = false, Highlight highlight_type_ = Highlight::identifier) : table_name_with_optional_uuid(table_name_with_optional_uuid_), allow_query_parameter(allow_query_parameter_), highlight_type(highlight_type_) { diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp index d38dc6d5f37..ad6b8e13ea6 100644 --- a/src/Parsers/ExpressionListParsers.cpp +++ b/src/Parsers/ExpressionListParsers.cpp @@ -2811,8 +2811,8 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po if (op.type == OperatorType::TupleElement) { ASTPtr tmp; - if (asterisk_parser.parse(pos, tmp, expected) || - columns_matcher_parser.parse(pos, tmp, expected)) + if (asterisk_parser.parse(pos, tmp, expected) + || columns_matcher_parser.parse(pos, tmp, expected)) { if (auto * asterisk = tmp->as()) { @@ -2833,6 +2833,17 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po layers.back()->pushOperand(std::move(tmp)); return Action::OPERATOR; } + + /// If it is an identifier, + /// replace it with literal, because an expression `expr().elem` + /// should be transformed to `tupleElement(expr(), 'elem')` for query analysis, + /// otherwise the identifier `elem` will not be found. + if (ParserIdentifier().parse(pos, tmp, expected)) + { + layers.back()->pushOperator(op); + layers.back()->pushOperand(std::make_shared(tmp->as()->name())); + return Action::OPERATOR; + } } /// isNull & isNotNull are postfix unary operators @@ -2863,7 +2874,7 @@ Action ParserExpressionImpl::tryParseOperator(Layers & layers, IParser::Pos & po layers.push_back(std::make_unique()); if (op.type == OperatorType::StartBetween || op.type == OperatorType::StartNotBetween) - layers.back()->between_counter++; + ++layers.back()->between_counter; return Action::OPERAND; } diff --git a/src/Parsers/IAST.cpp b/src/Parsers/IAST.cpp index 37d7f458d61..5bd2c92c60a 100644 --- a/src/Parsers/IAST.cpp +++ b/src/Parsers/IAST.cpp @@ -165,11 +165,12 @@ size_t IAST::checkDepthImpl(size_t max_depth) const return res; } -String IAST::formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets) const +String IAST::formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets, bool print_pretty_type_names) const { WriteBufferFromOwnString buf; FormatSettings settings(buf, one_line); settings.show_secrets = show_secrets; + settings.print_pretty_type_names = print_pretty_type_names; format(settings); return wipeSensitiveDataAndCutToLength(buf.str(), max_length); } diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index e2cf7579667..2293d50b0ec 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -201,6 +201,7 @@ public: bool show_secrets; /// Show secret parts of the AST (e.g. passwords, encryption keys). char nl_or_ws; /// Newline or whitespace. LiteralEscapingStyle literal_escaping_style; + bool print_pretty_type_names; explicit FormatSettings( WriteBuffer & ostr_, @@ -209,7 +210,8 @@ public: bool always_quote_identifiers_ = false, IdentifierQuotingStyle identifier_quoting_style_ = IdentifierQuotingStyle::Backticks, bool show_secrets_ = true, - LiteralEscapingStyle literal_escaping_style_ = LiteralEscapingStyle::Regular) + LiteralEscapingStyle literal_escaping_style_ = LiteralEscapingStyle::Regular, + bool print_pretty_type_names_ = false) : ostr(ostr_) , one_line(one_line_) , hilite(hilite_) @@ -218,6 +220,7 @@ public: , show_secrets(show_secrets_) , nl_or_ws(one_line ? ' ' : '\n') , literal_escaping_style(literal_escaping_style_) + , print_pretty_type_names(print_pretty_type_names_) { } @@ -230,6 +233,7 @@ public: , show_secrets(other.show_secrets) , nl_or_ws(other.nl_or_ws) , literal_escaping_style(other.literal_escaping_style) + , print_pretty_type_names(other.print_pretty_type_names) { } @@ -251,7 +255,7 @@ public: /// The state that is copied when each node is formatted. For example, nesting level. struct FormatStateStacked { - UInt8 indent = 0; + UInt16 indent = 0; bool need_parens = false; bool expression_list_always_start_on_new_line = false; /// Line feed and indent before expression list even if it's of single element. bool expression_list_prepend_whitespace = false; /// Prepend whitespace (if it is required) @@ -274,7 +278,7 @@ public: /// Secrets are displayed regarding show_secrets, then SensitiveDataMasker is applied. /// You can use Interpreters/formatWithPossiblyHidingSecrets.h for convenience. - String formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets) const; + String formatWithPossiblyHidingSensitiveData(size_t max_length, bool one_line, bool show_secrets, bool print_pretty_type_names) const; /** formatForLogging and formatForErrorMessage always hide secrets. This inconsistent * behaviour is due to the fact such functions are called from Client which knows nothing about @@ -283,12 +287,12 @@ public: */ String formatForLogging(size_t max_length = 0) const { - return formatWithPossiblyHidingSensitiveData(max_length, true, false); + return formatWithPossiblyHidingSensitiveData(max_length, true, false, false); } String formatForErrorMessage() const { - return formatWithPossiblyHidingSensitiveData(0, true, false); + return formatWithPossiblyHidingSensitiveData(0, true, false, false); } virtual bool hasSecretParts() const { return childrenHaveSecretParts(); } diff --git a/src/Parsers/Lexer.cpp b/src/Parsers/Lexer.cpp index b4601389696..43c4ab867d1 100644 --- a/src/Parsers/Lexer.cpp +++ b/src/Parsers/Lexer.cpp @@ -423,6 +423,8 @@ Token Lexer::nextTokenImpl() } case '?': return Token(TokenType::QuestionMark, token_begin, ++pos); + case '^': + return Token(TokenType::Caret, token_begin, ++pos); case ':': { ++pos; diff --git a/src/Parsers/Lexer.h b/src/Parsers/Lexer.h index 6f31d56292d..9dc0850abfd 100644 --- a/src/Parsers/Lexer.h +++ b/src/Parsers/Lexer.h @@ -45,6 +45,7 @@ namespace DB M(Arrow) /** ->. Should be distinguished from minus operator. */ \ M(QuestionMark) \ M(Colon) \ + M(Caret) \ M(DoubleColon) \ M(Equals) \ M(NotEquals) \ diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index cc4e02f46a3..31dc2075db4 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -53,40 +53,6 @@ ASTPtr parseComment(IParser::Pos & pos, Expected & expected) } - -bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) -{ - ParserToken open(TokenType::OpeningRoundBracket); - ParserToken close(TokenType::ClosingRoundBracket); - ParserIdentifier name_p; - ParserNameTypePairList columns_p; - - ASTPtr name; - ASTPtr columns; - - /// For now `name == 'Nested'`, probably alternative nested data structures will appear - if (!name_p.parse(pos, name, expected)) - return false; - - if (!open.ignore(pos, expected)) - return false; - - if (!columns_p.parse(pos, columns, expected)) - return false; - - if (!close.ignore(pos, expected)) - return false; - - auto func = std::make_shared(); - tryGetIdentifierNameInto(name, func->name); - - func->arguments = columns; - func->children.push_back(columns); - node = func; - - return true; -} - bool ParserSQLSecurity::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserToken s_eq(TokenType::Equals); diff --git a/src/Parsers/ParserCreateQuery.h b/src/Parsers/ParserCreateQuery.h index 53a62deb22b..82da2e7ea0b 100644 --- a/src/Parsers/ParserCreateQuery.h +++ b/src/Parsers/ParserCreateQuery.h @@ -18,15 +18,6 @@ namespace DB { -/** A nested table. For example, Nested(UInt32 CounterID, FixedString(2) UserAgentMajor) - */ -class ParserNestedTable : public IParserBase -{ -protected: - const char * getName() const override { return "nested table"; } - bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; -}; - /** Parses sql security option. DEFINER = user_name SQL SECURITY DEFINER */ class ParserSQLSecurity : public IParserBase diff --git a/src/Parsers/ParserDataType.cpp b/src/Parsers/ParserDataType.cpp index 2edb0141e12..d86b659df90 100644 --- a/src/Parsers/ParserDataType.cpp +++ b/src/Parsers/ParserDataType.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -16,8 +17,8 @@ namespace DB namespace { -/// Parser of Dynamic type arguments: Dynamic(max_types=N) -class DynamicArgumentsParser : public IParserBase +/// Parser of Dynamic type argument: Dynamic(max_types=N) +class DynamicArgumentParser : public IParserBase { private: const char * getName() const override { return "Dynamic data type optional argument"; } @@ -46,14 +47,84 @@ private: } }; +/// Parser of Object type argument. For example: JSON(some_parameter=N, some.path SomeType, SKIP skip.path, ...) +class ObjectArgumentParser : public IParserBase +{ +private: + const char * getName() const override { return "JSON data type optional argument"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override + { + auto argument = std::make_shared(); + + /// SKIP arguments + if (ParserKeyword(Keyword::SKIP).ignore(pos)) + { + /// SKIP REGEXP '' + if (ParserKeyword(Keyword::REGEXP).ignore(pos)) + { + ParserStringLiteral literal_parser; + ASTPtr literal; + if (!literal_parser.parse(pos, literal, expected)) + return false; + argument->skip_path_regexp = literal; + argument->children.push_back(argument->skip_path_regexp); + } + /// SKIP some.path + else + { + ParserCompoundIdentifier compound_identifier_parser; + ASTPtr compound_identifier; + if (!compound_identifier_parser.parse(pos, compound_identifier, expected)) + return false; + + argument->skip_path = compound_identifier; + argument->children.push_back(argument->skip_path); + } + + node = argument; + return true; + } + + ParserCompoundIdentifier compound_identifier_parser; + ASTPtr identifier; + if (!compound_identifier_parser.parse(pos, identifier, expected)) + return false; + + /// some_parameter=N + if (pos->type == TokenType::Equals) + { + ++pos; + ASTPtr number; + ParserNumber number_parser; + if (!number_parser.parse(pos, number, expected)) + return false; + + argument->parameter = makeASTFunction("equals", identifier, number); + argument->children.push_back(argument->parameter); + node = argument; + return true; + } + + ParserDataType type_parser; + ASTPtr type; + if (!type_parser.parse(pos, type, expected)) + return false; + + auto name_and_type = std::make_shared(); + name_and_type->name = getIdentifierName(identifier); + name_and_type->type = type; + name_and_type->children.push_back(name_and_type->type); + argument->path_with_type = name_and_type; + argument->children.push_back(argument->path_with_type); + node = argument; + return true; + } +}; + } bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { - ParserNestedTable nested; - if (nested.parse(pos, node, expected)) - return true; - String type_name; ParserIdentifier name_parser; @@ -171,10 +242,12 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) /// Allow mixed lists of nested and normal types. /// Parameters are either: - /// - Nested table elements; + /// - Nested table element; + /// - Tuple element /// - Enum element in form of 'a' = 1; /// - literal; - /// - Dynamic type arguments; + /// - Dynamic type argument; + /// - JSON type argument; /// - another data type (or identifier); size_t arg_num = 0; @@ -192,13 +265,24 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ASTPtr arg; if (type_name == "Dynamic") { - DynamicArgumentsParser parser; + DynamicArgumentParser parser; + parser.parse(pos, arg, expected); + } + else if (type_name == "JSON") + { + ObjectArgumentParser parser; parser.parse(pos, arg, expected); } else if (type_name == "Nested") { - ParserNestedTable nested_parser; - nested_parser.parse(pos, arg, expected); + ParserNameTypePair name_and_type_parser; + name_and_type_parser.parse(pos, arg, expected); + } + else if (type_name == "Tuple") + { + ParserNameTypePair name_and_type_parser; + ParserDataType only_type_parser; + name_and_type_parser.parse(pos, arg, expected) || only_type_parser.parse(pos, arg, expected); } else if (type_name == "AggregateFunction" || type_name == "SimpleAggregateFunction") { @@ -252,9 +336,6 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ++arg_num; } - if (pos->type == TokenType::Comma) - // ignore trailing comma inside Nested structures like Tuple(Int, Tuple(Int, String),) - ++pos; if (pos->type != TokenType::ClosingRoundBracket) return false; ++pos; diff --git a/src/Parsers/ParserRefreshStrategy.cpp b/src/Parsers/ParserRefreshStrategy.cpp index e7912293d85..4f3b7c66558 100644 --- a/src/Parsers/ParserRefreshStrategy.cpp +++ b/src/Parsers/ParserRefreshStrategy.cpp @@ -96,6 +96,10 @@ bool ParserRefreshStrategy::parseImpl(Pos & pos, ASTPtr & node, Expected & expec return false; refresh->set(refresh->settings, settings); } + + if (ParserKeyword{Keyword::APPEND}.ignore(pos, expected)) + refresh->append = true; + node = refresh; return true; } diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index 81b64ab47c6..efabbbfa479 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -421,6 +421,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & break; case Type::REFRESH_VIEW: + case Type::WAIT_VIEW: case Type::START_VIEW: case Type::STOP_VIEW: case Type::CANCEL_VIEW: diff --git a/src/Processors/Executors/ExecutingGraph.cpp b/src/Processors/Executors/ExecutingGraph.cpp index 6d5b60d8159..f2927d4145c 100644 --- a/src/Processors/Executors/ExecutingGraph.cpp +++ b/src/Processors/Executors/ExecutingGraph.cpp @@ -96,7 +96,7 @@ bool ExecutingGraph::addEdges(uint64_t node) return was_edge_added; } -bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) +ExecutingGraph::UpdateNodeStatus ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) { auto & cur_node = *nodes[pid]; Processors new_processors; @@ -108,7 +108,7 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) catch (...) { cur_node.exception = std::current_exception(); - return false; + return UpdateNodeStatus::Exception; } { @@ -118,7 +118,7 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) { for (auto & processor : new_processors) processor->cancel(); - return false; + return UpdateNodeStatus::Cancelled; } processors->insert(processors->end(), new_processors.begin(), new_processors.end()); @@ -178,7 +178,7 @@ bool ExecutingGraph::expandPipeline(std::stack & stack, uint64_t pid) } } - return true; + return UpdateNodeStatus::Done; } void ExecutingGraph::initializeExecution(Queue & queue) @@ -213,7 +213,7 @@ void ExecutingGraph::initializeExecution(Queue & queue) } -bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue) +ExecutingGraph::UpdateNodeStatus ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue) { std::stack updated_edges; std::stack updated_processors; @@ -279,7 +279,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue try { auto & processor = *node.processor; - IProcessor::Status last_status = node.last_processor_status; + const auto last_status = node.last_processor_status; IProcessor::Status status = processor.prepare(node.updated_input_ports, node.updated_output_ports); node.last_processor_status = status; @@ -309,7 +309,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue catch (...) { node.exception = std::current_exception(); - return false; + return UpdateNodeStatus::Exception; } #ifndef NDEBUG @@ -319,7 +319,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue node.updated_input_ports.clear(); node.updated_output_ports.clear(); - switch (node.last_processor_status) + switch (*node.last_processor_status) { case IProcessor::Status::NeedData: case IProcessor::Status::PortFull: @@ -386,8 +386,9 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue read_lock.unlock(); { std::unique_lock lock(nodes_mutex); - if (!expandPipeline(updated_processors, pid)) - return false; + auto status = expandPipeline(updated_processors, pid); + if (status != UpdateNodeStatus::Done) + return status; } read_lock.lock(); @@ -397,7 +398,7 @@ bool ExecutingGraph::updateNode(uint64_t pid, Queue & queue, Queue & async_queue } } - return true; + return UpdateNodeStatus::Done; } void ExecutingGraph::cancel(bool cancel_all_processors) diff --git a/src/Processors/Executors/ExecutingGraph.h b/src/Processors/Executors/ExecutingGraph.h index 71dcd360a2c..8d8ba722b3e 100644 --- a/src/Processors/Executors/ExecutingGraph.h +++ b/src/Processors/Executors/ExecutingGraph.h @@ -92,7 +92,7 @@ public: std::exception_ptr exception; /// Last state for profiling. - IProcessor::Status last_processor_status = IProcessor::Status::NeedData; + std::optional last_processor_status; /// Ports which have changed their state since last processor->prepare() call. /// They changed when neighbour processors interact with connected ports. @@ -138,10 +138,17 @@ public: /// Traverse graph the first time to update all the childless nodes. void initializeExecution(Queue & queue); + enum class UpdateNodeStatus + { + Done, + Exception, + Cancelled, + }; + /// Update processor with pid number (call IProcessor::prepare). /// Check parents and children of current processor and push them to stacks if they also need to be updated. /// If processor wants to be expanded, lock will be upgraded to get write access to pipeline. - bool updateNode(uint64_t pid, Queue & queue, Queue & async_queue); + UpdateNodeStatus updateNode(uint64_t pid, Queue & queue, Queue & async_queue); void cancel(bool cancel_all_processors = true); @@ -155,7 +162,7 @@ private: /// Update graph after processor (pid) returned ExpandPipeline status. /// All new nodes and nodes with updated ports are pushed into stack. - bool expandPipeline(std::stack & stack, uint64_t pid); + UpdateNodeStatus expandPipeline(std::stack & stack, uint64_t pid); std::shared_ptr processors; std::vector source_processors; diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 82cad471a29..72e1afaafaa 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -77,9 +77,13 @@ const Processors & PipelineExecutor::getProcessors() const return graph->getProcessors(); } -void PipelineExecutor::cancel() +void PipelineExecutor::cancel(ExecutionStatus reason) { - cancelled = true; + /// It is allowed to cancel not started query by user. + if (reason == ExecutionStatus::CancelledByUser) + tryUpdateExecutionStatus(ExecutionStatus::NotStarted, reason); + + tryUpdateExecutionStatus(ExecutionStatus::Executing, reason); finish(); graph->cancel(); } @@ -98,6 +102,11 @@ void PipelineExecutor::finish() tasks.finish(); } +bool PipelineExecutor::tryUpdateExecutionStatus(ExecutionStatus expected, ExecutionStatus desired) +{ + return execution_status.compare_exchange_strong(expected, desired); +} + void PipelineExecutor::execute(size_t num_threads, bool concurrency_control) { checkTimeLimit(); @@ -120,7 +129,7 @@ void PipelineExecutor::execute(size_t num_threads, bool concurrency_control) } catch (...) { - span.addAttribute(ExecutionStatus::fromCurrentException()); + span.addAttribute(DB::ExecutionStatus::fromCurrentException()); #ifndef NDEBUG LOG_TRACE(log, "Exception while executing query. Current state:\n{}", dumpPipeline()); @@ -169,7 +178,7 @@ bool PipelineExecutor::checkTimeLimitSoft() // We call cancel here so that all processors are notified and tasks waken up // so that the "break" is faster and doesn't wait for long events if (!continuing) - cancel(); + cancel(ExecutionStatus::CancelledByTimeout); return continuing; } @@ -195,7 +204,8 @@ void PipelineExecutor::finalizeExecution() { checkTimeLimit(); - if (cancelled) + auto status = execution_status.load(); + if (status == ExecutionStatus::CancelledByTimeout || status == ExecutionStatus::CancelledByUser) return; bool all_processors_finished = true; @@ -271,7 +281,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie break; if (!context.executeTask()) - cancel(); + cancel(ExecutionStatus::Exception); if (tasks.isFinished()) break; @@ -289,11 +299,13 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie Queue async_queue; /// Prepare processor after execution. - if (!graph->updateNode(context.getProcessorID(), queue, async_queue)) - cancel(); + auto status = graph->updateNode(context.getProcessorID(), queue, async_queue); + if (status == ExecutingGraph::UpdateNodeStatus::Exception) + cancel(ExecutionStatus::Exception); /// Push other tasks to global queue. - tasks.pushTasks(queue, async_queue, context); + if (status == ExecutingGraph::UpdateNodeStatus::Done) + tasks.pushTasks(queue, async_queue, context); } #ifndef NDEBUG @@ -309,7 +321,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie { /// spawnThreads can throw an exception, for example CANNOT_SCHEDULE_TASK. /// We should cancel execution properly before rethrow. - cancel(); + cancel(ExecutionStatus::Exception); throw; } @@ -328,6 +340,7 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie void PipelineExecutor::initializeExecution(size_t num_threads, bool concurrency_control) { is_execution_initialized = true; + tryUpdateExecutionStatus(ExecutionStatus::NotStarted, ExecutionStatus::Executing); size_t use_threads = num_threads; @@ -393,7 +406,7 @@ void PipelineExecutor::executeImpl(size_t num_threads, bool concurrency_control) { /// If finished_flag is not set, there was an exception. /// Cancel execution in this case. - cancel(); + cancel(ExecutionStatus::Exception); if (pool) pool->wait(); } @@ -432,7 +445,7 @@ String PipelineExecutor::dumpPipeline() const } } - std::vector statuses; + std::vector> statuses; std::vector proc_list; statuses.reserve(graph->nodes.size()); proc_list.reserve(graph->nodes.size()); diff --git a/src/Processors/Executors/PipelineExecutor.h b/src/Processors/Executors/PipelineExecutor.h index ae119355cb5..79d0a29d4e1 100644 --- a/src/Processors/Executors/PipelineExecutor.h +++ b/src/Processors/Executors/PipelineExecutor.h @@ -48,8 +48,20 @@ public: const Processors & getProcessors() const; + enum class ExecutionStatus + { + NotStarted, + Executing, + Finished, + Exception, + CancelledByUser, + CancelledByTimeout, + }; + /// Cancel execution. May be called from another thread. - void cancel(); + void cancel() { cancel(ExecutionStatus::CancelledByUser); } + + ExecutionStatus getExecutionStatus() const { return execution_status.load(); } /// Cancel processors which only read data from source. May be called from another thread. void cancelReading(); @@ -81,7 +93,7 @@ private: /// system.opentelemetry_span_log bool trace_processors = false; - std::atomic_bool cancelled = false; + std::atomic execution_status = ExecutionStatus::NotStarted; std::atomic_bool cancelled_reading = false; LoggerPtr log = getLogger("PipelineExecutor"); @@ -105,6 +117,10 @@ private: void executeStepImpl(size_t thread_num, std::atomic_bool * yield_flag = nullptr); void executeSingleThread(size_t thread_num); void finish(); + void cancel(ExecutionStatus reason); + + /// If execution_status == from, change it to desired. + bool tryUpdateExecutionStatus(ExecutionStatus expected, ExecutionStatus desired); String dumpPipeline() const; }; diff --git a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp index 830a96533ed..866d224a08d 100644 --- a/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingAsyncPipelineExecutor.cpp @@ -15,6 +15,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int QUERY_WAS_CANCELLED; } class PushingAsyncSource : public ISource @@ -176,6 +177,16 @@ void PushingAsyncPipelineExecutor::start() data->thread = ThreadFromGlobalPool(std::move(func)); } +[[noreturn]] static void throwOnExecutionStatus(PipelineExecutor::ExecutionStatus status) +{ + if (status == PipelineExecutor::ExecutionStatus::CancelledByTimeout + || status == PipelineExecutor::ExecutionStatus::CancelledByUser) + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); + + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); +} + void PushingAsyncPipelineExecutor::push(Chunk chunk) { if (!started) @@ -185,8 +196,7 @@ void PushingAsyncPipelineExecutor::push(Chunk chunk) data->rethrowExceptionIfHas(); if (!is_pushed) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Pipeline for PushingAsyncPipelineExecutor was finished before all data was inserted"); + throwOnExecutionStatus(data->executor->getExecutionStatus()); } void PushingAsyncPipelineExecutor::push(Block block) diff --git a/src/Processors/Executors/PushingPipelineExecutor.cpp b/src/Processors/Executors/PushingPipelineExecutor.cpp index 696932932df..7a1c0111a3a 100644 --- a/src/Processors/Executors/PushingPipelineExecutor.cpp +++ b/src/Processors/Executors/PushingPipelineExecutor.cpp @@ -11,6 +11,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int QUERY_WAS_CANCELLED; } class PushingSource : public ISource @@ -80,6 +81,15 @@ const Block & PushingPipelineExecutor::getHeader() const return pushing_source->getPort().getHeader(); } +[[noreturn]] static void throwOnExecutionStatus(PipelineExecutor::ExecutionStatus status) +{ + if (status == PipelineExecutor::ExecutionStatus::CancelledByTimeout + || status == PipelineExecutor::ExecutionStatus::CancelledByUser) + throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query was cancelled"); + + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); +} void PushingPipelineExecutor::start() { @@ -91,8 +101,7 @@ void PushingPipelineExecutor::start() executor->setReadProgressCallback(pipeline.getReadProgressCallback()); if (!executor->executeStep(&input_wait_flag)) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); + throwOnExecutionStatus(executor->getExecutionStatus()); } void PushingPipelineExecutor::push(Chunk chunk) @@ -103,8 +112,7 @@ void PushingPipelineExecutor::push(Chunk chunk) pushing_source->setData(std::move(chunk)); if (!executor->executeStep(&input_wait_flag)) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Pipeline for PushingPipelineExecutor was finished before all data was inserted"); + throwOnExecutionStatus(executor->getExecutionStatus()); } void PushingPipelineExecutor::push(Block block) diff --git a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp index ed91913de4d..77d5867c554 100644 --- a/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp +++ b/src/Processors/Formats/Impl/ArrowColumnToCHColumn.cpp @@ -133,16 +133,31 @@ static ColumnWithTypeAndName readColumnWithStringData(const std::shared_ptr buffer = chunk.value_data(); const size_t chunk_length = chunk.length(); - for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i) + const size_t null_count = chunk.null_count(); + if (null_count == 0) { - if (!chunk.IsNull(offset_i) && buffer) + for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i) { const auto * raw_data = buffer->data() + chunk.value_offset(offset_i); column_chars_t.insert_assume_reserved(raw_data, raw_data + chunk.value_length(offset_i)); - } - column_chars_t.emplace_back('\0'); + column_chars_t.emplace_back('\0'); - column_offsets.emplace_back(column_chars_t.size()); + column_offsets.emplace_back(column_chars_t.size()); + } + } + else + { + for (size_t offset_i = 0; offset_i != chunk_length; ++offset_i) + { + if (!chunk.IsNull(offset_i) && buffer) + { + const auto * raw_data = buffer->data() + chunk.value_offset(offset_i); + column_chars_t.insert_assume_reserved(raw_data, raw_data + chunk.value_length(offset_i)); + } + column_chars_t.emplace_back('\0'); + + column_offsets.emplace_back(column_chars_t.size()); + } } } return {std::move(internal_column), std::move(internal_type), column_name}; @@ -743,6 +758,15 @@ static ColumnWithTypeAndName readNonNullableColumnFromArrowColumn( case TypeIndex::IPv6: return readIPv6ColumnFromBinaryData(arrow_column, column_name); /// ORC format outputs big integers as binary column, because there is no fixed binary in ORC. + /// + /// When ORC/Parquet file says the type is "byte array" or "fixed len byte array", + /// but the clickhouse query says to interpret the column as e.g. Int128, it + /// may mean one of two things: + /// * The byte array is the 16 bytes of Int128, little-endian. + /// * The byte array is an ASCII string containing the Int128 formatted in base 10. + /// There's no reliable way to distinguish these cases. We just guess: if the + /// byte array is variable-length, and the length is different from sizeof(type), + /// we parse as text, otherwise as binary. case TypeIndex::Int128: return readColumnWithBigNumberFromBinaryData(arrow_column, column_name, type_hint); case TypeIndex::UInt128: diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp index 9da5e533324..a72c6037619 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.cpp @@ -168,10 +168,11 @@ JSONAsObjectRowInputFormat::JSONAsObjectRowInputFormat( const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_) : JSONAsRowInputFormat(header_, in_, params_, format_settings_) { - if (!isObject(header_.getByPosition(0).type)) + const auto & type = header_.getByPosition(0).type; + if (!isObject(type) && !isObjectDeprecated(type)) throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Input format JSONAsObject is only suitable for tables with a single column of type Object but the column type is {}", - header_.getByPosition(0).type->getName()); + "Input format JSONAsObject is only suitable for tables with a single column of type Object/JSON but the column type is {}", + type->getName()); } void JSONAsObjectRowInputFormat::readJSONObject(IColumn & column) @@ -186,13 +187,13 @@ Chunk JSONAsObjectRowInputFormat::getChunkForCount(size_t rows) return Chunk({std::move(column)}, rows); } -JSONAsObjectExternalSchemaReader::JSONAsObjectExternalSchemaReader(const FormatSettings & settings) +JSONAsObjectExternalSchemaReader::JSONAsObjectExternalSchemaReader(const FormatSettings & settings_) : settings(settings_) { - if (!settings.json.allow_object_type) + if (!settings.json.allow_deprecated_object_type && !settings.json.allow_json_type) throw Exception( ErrorCodes::ILLEGAL_COLUMN, - "Cannot infer the data structure in JSONAsObject format because experimental Object type is not allowed. Set setting " - "allow_experimental_object_type = 1 in order to allow it"); + "Cannot infer the data structure in JSONAsObject format because experimental Object/JSON type is not allowed. Set setting " + "allow_experimental_object_type = 1 or allow_experimental_json_type=1 in order to allow it"); } void registerInputFormatJSONAsString(FormatFactory & factory) diff --git a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h index 5eaa88182b7..f33108472de 100644 --- a/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONAsStringRowInputFormat.h @@ -5,6 +5,7 @@ #include #include #include +#include #include namespace DB @@ -70,12 +71,17 @@ public: class JSONAsObjectExternalSchemaReader : public IExternalSchemaReader { public: - explicit JSONAsObjectExternalSchemaReader(const FormatSettings & settings); + explicit JSONAsObjectExternalSchemaReader(const FormatSettings & settings_); NamesAndTypesList readSchema() override { - return {{"json", std::make_shared("json", false)}}; + if (settings.json.allow_json_type) + return {{"json", std::make_shared(DataTypeObject::SchemaFormat::JSON)}}; + return {{"json", std::make_shared("json", false)}}; } + +private: + FormatSettings settings; }; } diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 58bec8120f1..e68286bfcc5 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -1143,24 +1143,42 @@ readColumnWithStringData(const orc::ColumnVectorBatch * orc_column, const orc::T reserver_size += 1; } - column_chars_t.reserve(reserver_size); - column_offsets.reserve(orc_str_column->numElements); + column_chars_t.resize_exact(reserver_size); + column_offsets.resize_exact(orc_str_column->numElements); size_t curr_offset = 0; - for (size_t i = 0; i < orc_str_column->numElements; ++i) + if (!orc_str_column->hasNulls) { - if (!orc_str_column->hasNulls || orc_str_column->notNull[i]) + for (size_t i = 0; i < orc_str_column->numElements; ++i) { const auto * buf = orc_str_column->data[i]; size_t buf_size = orc_str_column->length[i]; - column_chars_t.insert_assume_reserved(buf, buf + buf_size); + memcpy(&column_chars_t[curr_offset], buf, buf_size); curr_offset += buf_size; + + column_chars_t[curr_offset] = 0; + ++curr_offset; + + column_offsets[i] = curr_offset; } + } + else + { + for (size_t i = 0; i < orc_str_column->numElements; ++i) + { + if (orc_str_column->notNull[i]) + { + const auto * buf = orc_str_column->data[i]; + size_t buf_size = orc_str_column->length[i]; + memcpy(&column_chars_t[curr_offset], buf, buf_size); + curr_offset += buf_size; + } - column_chars_t.push_back(0); - ++curr_offset; + column_chars_t[curr_offset] = 0; + ++curr_offset; - column_offsets.push_back(curr_offset); + column_offsets[i] = curr_offset; + } } return {std::move(internal_column), std::move(internal_type), column_name}; } diff --git a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index bc5e8292192..1f213fef731 100644 --- a/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -25,6 +25,7 @@ #include #include #include +#include namespace CurrentMetrics { @@ -54,7 +55,7 @@ namespace ErrorCodes } \ } while (false) -/// Decode min/max value from column chunk statistics. +/// Decode min/max value from column chunk statistics. Returns Null if missing or unsupported. /// /// There are two questionable decisions in this implementation: /// * We parse the value from the encoded byte string instead of casting the parquet::Statistics @@ -62,7 +63,7 @@ namespace ErrorCodes /// * We dispatch based on the parquet logical+converted+physical type instead of the ClickHouse type. /// The idea is that this is similar to what we'll have to do when reimplementing Parquet parsing in /// ClickHouse instead of using Arrow (for speed). So, this is an exercise in parsing Parquet manually. -static std::optional decodePlainParquetValueSlow(const std::string & data, parquet::Type::type physical_type, const parquet::ColumnDescriptor & descr) +static Field decodePlainParquetValueSlow(const std::string & data, parquet::Type::type physical_type, const parquet::ColumnDescriptor & descr, TypeIndex type_hint) { using namespace parquet; @@ -118,8 +119,6 @@ static std::optional decodePlainParquetValueSlow(const std::string & data if (data.size() != size || size < 1 || size > 32) throw Exception(ErrorCodes::CANNOT_PARSE_NUMBER, "Unexpected decimal size: {} (actual {})", size, data.size()); - /// For simplicity, widen all decimals to 256-bit. It should compare correctly with values - /// of different bitness. Int256 val = 0; memcpy(&val, data.data(), size); if (big_endian) @@ -128,7 +127,19 @@ static std::optional decodePlainParquetValueSlow(const std::string & data if (size < 32 && (val >> (size * 8 - 1)) != 0) val |= ~((Int256(1) << (size * 8)) - 1); - return Field(DecimalField(Decimal256(val), static_cast(scale))); + auto narrow = [&](auto x) -> Field + { + memcpy(&x, &val, sizeof(x)); + return Field(DecimalField(x, static_cast(scale))); + }; + if (size <= 4) + return narrow(Decimal32(0)); + else if (size <= 8) + return narrow(Decimal64(0)); + else if (size <= 16) + return narrow(Decimal128(0)); + else + return narrow(Decimal256(0)); } while (false); @@ -185,8 +196,6 @@ static std::optional decodePlainParquetValueSlow(const std::string & data return Field(val); } - /// Strings. - if (physical_type == Type::type::BYTE_ARRAY || physical_type == Type::type::FIXED_LEN_BYTE_ARRAY) { /// Arrow's parquet decoder handles missing min/max values slightly incorrectly. @@ -213,14 +222,31 @@ static std::optional decodePlainParquetValueSlow(const std::string & data /// TODO: Remove this workaround either when we implement our own Parquet decoder that /// doesn't have this bug, or if it's fixed in Arrow. if (data.empty()) - return std::nullopt; + return Field(); + /// Long integers, encoded either as text or as little-endian bytes. + /// The parquet file doesn't know that it's numbers, so the min/max are produced by comparing + /// strings lexicographically. So these min and max are mostly useless to us. + /// There's one case where they're not useless: min == max; currently we don't make use of this. + switch (type_hint) + { + case TypeIndex::UInt128: + case TypeIndex::UInt256: + case TypeIndex::Int128: + case TypeIndex::Int256: + case TypeIndex::IPv6: + return Field(); + default: break; + } + + /// Strings. return Field(data); } - /// This one's deprecated in Parquet. + /// This type is deprecated in Parquet. + /// TODO: But turns out it's still used in practice, we should support it. if (physical_type == Type::type::INT96) - throw Exception(ErrorCodes::CANNOT_PARSE_NUMBER, "Parquet INT96 type is deprecated and not supported"); + return Field(); /// Integers. @@ -260,6 +286,9 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa if (!s) continue; + if (s->descr()->schema_node()->is_repeated()) + continue; + auto path = c->path_in_schema()->ToDotVector(); if (path.size() != 1) continue; // compound types not supported @@ -283,15 +312,13 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa continue; auto stats = it->second; - auto default_value = [&]() -> Field - { - DataTypePtr type = header.getByPosition(idx).type; - if (type->lowCardinality()) - type = assert_cast(*type).getDictionaryType(); - if (type->isNullable()) - type = assert_cast(*type).getNestedType(); - return type->getDefault(); - }; + DataTypePtr type = header.getByPosition(idx).type; + if (type->lowCardinality()) + type = assert_cast(*type).getDictionaryType(); + if (type->isNullable()) + type = assert_cast(*type).getNestedType(); + Field default_value = type->getDefault(); + TypeIndex type_index = type->getTypeId(); /// Only primitive fields are supported, not arrays, maps, tuples, or Nested. /// Arrays, maps, and Nested can't be meaningfully supported because Parquet only has min/max @@ -299,14 +326,47 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa /// Same limitation for tuples, but maybe it would make sense to have some kind of tuple /// expansion in KeyCondition to accept ranges per element instead of whole tuple. - std::optional min; - std::optional max; + Field min; + Field max; if (stats->HasMinMax()) { try { - min = decodePlainParquetValueSlow(stats->EncodeMin(), stats->physical_type(), *stats->descr()); - max = decodePlainParquetValueSlow(stats->EncodeMax(), stats->physical_type(), *stats->descr()); + min = decodePlainParquetValueSlow(stats->EncodeMin(), stats->physical_type(), *stats->descr(), type_index); + max = decodePlainParquetValueSlow(stats->EncodeMax(), stats->physical_type(), *stats->descr(), type_index); + + /// If the data type in parquet file substantially differs from the requested data type, + /// it's sometimes correct to just typecast the min/max values. + /// Other times it's incorrect, e.g.: + /// INSERT INTO FUNCTION file('t.parquet', Parquet, 'x String') VALUES ('1'), ('100'), ('2'); + /// SELECT * FROM file('t.parquet', Parquet, 'x Int64') WHERE x >= 3; + /// If we just typecast min/max from string to integer, this query will incorrectly return empty result. + /// Allow conversion in some simple cases, otherwise ignore the min/max values. + auto min_type = min.getType(); + auto max_type = max.getType(); + min = convertFieldToType(min, *type); + max = convertFieldToType(max, *type); + auto ok_cast = [&](Field::Types::Which from, Field::Types::Which to) -> bool + { + if (from == to) + return true; + /// Decimal -> wider decimal. + if (Field::isDecimal(from) || Field::isDecimal(to)) + return Field::isDecimal(from) && Field::isDecimal(to) && to >= from; + /// Integer -> IP. + if (to == Field::Types::IPv4) + return from == Field::Types::UInt64; + /// Disable index for everything else, especially string <-> number. + return false; + }; + if (!(ok_cast(min_type, min.getType()) && ok_cast(max_type, max.getType())) && + !(min == max) && + !(min_type == Field::Types::Int64 && min.getType() == Field::Types::UInt64 && min.safeGet() >= 0) && + !(max_type == Field::Types::UInt64 && max.getType() == Field::Types::Int64 && max.safeGet() <= UInt64(INT64_MAX))) + { + min = Field(); + max = Field(); + } } catch (Exception & e) { @@ -328,7 +388,7 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa { /// Single-point range containing either the default value of one of the infinities. if (null_as_default) - hyperrectangle[idx].right = hyperrectangle[idx].left = default_value(); + hyperrectangle[idx].right = hyperrectangle[idx].left = default_value; else hyperrectangle[idx].right = hyperrectangle[idx].left; continue; @@ -339,32 +399,31 @@ static std::vector getHyperrectangleForRowGroup(const parquet::FileMetaDa if (null_as_default) { /// Make sure the range contains the default value. - Field def = default_value(); - if (min.has_value() && applyVisitor(FieldVisitorAccurateLess(), def, *min)) - min = def; - if (max.has_value() && applyVisitor(FieldVisitorAccurateLess(), *max, def)) - max = def; + if (!min.isNull() && applyVisitor(FieldVisitorAccurateLess(), default_value, min)) + min = default_value; + if (!max.isNull() && applyVisitor(FieldVisitorAccurateLess(), max, default_value)) + max = default_value; } else { /// Make sure the range reaches infinity on at least one side. - if (min.has_value() && max.has_value()) - min.reset(); + if (!min.isNull() && !max.isNull()) + min = Field(); } } else { /// If the column doesn't have nulls, exclude both infinities. - if (!min.has_value()) + if (min.isNull()) hyperrectangle[idx].left_included = false; - if (!max.has_value()) + if (max.isNull()) hyperrectangle[idx].right_included = false; } - if (min.has_value()) - hyperrectangle[idx].left = std::move(min.value()); - if (max.has_value()) - hyperrectangle[idx].right = std::move(max.value()); + if (!min.isNull()) + hyperrectangle[idx].left = std::move(min); + if (!max.isNull()) + hyperrectangle[idx].right = std::move(max); } return hyperrectangle; diff --git a/src/Processors/IProcessor.cpp b/src/Processors/IProcessor.cpp index edb4d662d8b..fc595a7b565 100644 --- a/src/Processors/IProcessor.cpp +++ b/src/Processors/IProcessor.cpp @@ -55,9 +55,12 @@ void IProcessor::dump() const } -std::string IProcessor::statusToName(Status status) +std::string IProcessor::statusToName(std::optional status) { - switch (status) + if (status == std::nullopt) + return "NotStarted"; + + switch (*status) { case Status::NeedData: return "NeedData"; diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index f1ce044d92f..02b8a3daa28 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -162,7 +162,7 @@ public: ExpandPipeline, }; - static std::string statusToName(Status status); + static std::string statusToName(std::optional status); /** Method 'prepare' is responsible for all cheap ("instantaneous": O(1) of data volume, no wait) calculations. * diff --git a/src/Processors/Merges/AggregatingSortedTransform.h b/src/Processors/Merges/AggregatingSortedTransform.h index c6d7e844c65..c96ad3db525 100644 --- a/src/Processors/Merges/AggregatingSortedTransform.h +++ b/src/Processors/Merges/AggregatingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event AggregatingSortedMilliseconds; +} + namespace DB { @@ -29,6 +34,11 @@ public: } String getName() const override { return "AggregatingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::AggregatingSortedMilliseconds, "Aggregated sorted", getLogger("AggregatingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h index 53c103e7038..908994e1851 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h @@ -30,6 +30,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } + /// Stores information for aggregation of SimpleAggregateFunction columns struct SimpleAggregateDescription { diff --git a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp index 86675bcb237..477566d8a94 100644 --- a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp @@ -126,6 +126,9 @@ IMergingAlgorithm::Status FinishAggregatingInOrderAlgorithm::merge() Chunk FinishAggregatingInOrderAlgorithm::prepareToMerge() { + total_merged_rows += accumulated_rows; + total_merged_bytes += accumulated_bytes; + accumulated_rows = 0; accumulated_bytes = 0; diff --git a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h index cc6578e79be..c34028b1cba 100644 --- a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h +++ b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h @@ -50,6 +50,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return {.bytes = accumulated_bytes, .rows = accumulated_rows, .blocks = chunk_num}; } + private: Chunk prepareToMerge(); void addToAggregation(); @@ -92,6 +94,9 @@ private: UInt64 chunk_num = 0; size_t accumulated_rows = 0; size_t accumulated_bytes = 0; + + size_t total_merged_rows = 0; + size_t total_merged_bytes = 0; }; } diff --git a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h index aaa3859efb6..cb2775c968d 100644 --- a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h @@ -33,6 +33,8 @@ public: const char * getName() const override { return "GraphiteRollupSortedAlgorithm"; } Status merge() override; + MergedStats getMergedStats() const override { return merged_data->getMergedStats(); } + struct ColumnsDefinition { size_t path_column_num; diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithm.h b/src/Processors/Merges/Algorithms/IMergingAlgorithm.h index 9a1c7c24270..83f11232b71 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithm.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB { @@ -65,6 +65,15 @@ public: IMergingAlgorithm() = default; virtual ~IMergingAlgorithm() = default; + + struct MergedStats + { + UInt64 bytes = 0; + UInt64 rows = 0; + UInt64 blocks = 0; + }; + + virtual MergedStats getMergedStats() const = 0; }; // TODO: use when compile with clang which could support it diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h index bc1aafe93f7..1725108ac5d 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h @@ -16,6 +16,8 @@ public: void initialize(Inputs inputs) override; void consume(Input & input, size_t source_num) override; + MergedStats getMergedStats() const override { return merged_data->getMergedStats(); } + private: Block header; SortDescription description; diff --git a/src/Processors/Merges/Algorithms/MergedData.h b/src/Processors/Merges/Algorithms/MergedData.h index c5bb074bb0c..8f47f89d8ee 100644 --- a/src/Processors/Merges/Algorithms/MergedData.h +++ b/src/Processors/Merges/Algorithms/MergedData.h @@ -183,6 +183,8 @@ public: UInt64 totalAllocatedBytes() const { return total_allocated_bytes; } UInt64 maxBlockSize() const { return max_block_size; } + IMergingAlgorithm::MergedStats getMergedStats() const { return {.bytes = total_allocated_bytes, .rows = total_merged_rows, .blocks = total_chunks}; } + virtual ~MergedData() = default; protected: diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index bcb111baadf..c889668a38e 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -31,7 +31,7 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; - const MergedData & getMergedData() const { return merged_data; } + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } private: Block header; diff --git a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h index 664b171c4b9..74b4e397831 100644 --- a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h @@ -30,6 +30,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } + struct AggregateDescription; struct MapDescription; diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 4479ac82f66..99fb700abf1 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event CollapsingSortedMilliseconds; +} + namespace DB { @@ -36,6 +41,11 @@ public: } String getName() const override { return "CollapsingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::CollapsingSortedMilliseconds, "Collapsed sorted", getLogger("CollapsingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/IMergingTransform.h b/src/Processors/Merges/IMergingTransform.h index be629271736..e5cd3bdde46 100644 --- a/src/Processors/Merges/IMergingTransform.h +++ b/src/Processors/Merges/IMergingTransform.h @@ -2,7 +2,10 @@ #include #include +#include #include +#include +#include namespace DB { @@ -110,6 +113,8 @@ public: void work() override { + Stopwatch watch{CLOCK_MONOTONIC_COARSE}; + if (!state.init_chunks.empty()) algorithm.initialize(std::move(state.init_chunks)); @@ -147,6 +152,8 @@ public: // std::cerr << "Finished" << std::endl; state.is_finished = true; } + + merging_elapsed_ns += watch.elapsedNanoseconds(); } protected: @@ -156,7 +163,33 @@ protected: Algorithm algorithm; /// Profile info. - Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; + UInt64 merging_elapsed_ns = 0; + + void logMergedStats(ProfileEvents::Event elapsed_ms_event, std::string_view transform_message, LoggerPtr log) const + { + auto stats = algorithm.getMergedStats(); + + UInt64 elapsed_ms = merging_elapsed_ns / 1000000LL; + ProfileEvents::increment(elapsed_ms_event, elapsed_ms); + + /// Don't print info for small parts (< 1M rows) + if (stats.rows < 1000000) + return; + + double seconds = static_cast(merging_elapsed_ns) / 1000000000ULL; + + if (seconds == 0.0) + { + LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in 0 sec.", + transform_message, stats.blocks, stats.rows, stats.bytes); + } + else + { + LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in {} sec., {} rows/sec., {}/sec.", + transform_message, stats.blocks, stats.rows, stats.bytes, + seconds, stats.rows / seconds, ReadableSize(stats.bytes / seconds)); + } + } private: using IMergingTransformBase::state; diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index 338b1ff7935..d2895a2a2e9 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -1,9 +1,12 @@ #include #include #include - #include -#include + +namespace ProfileEvents +{ + extern const Event MergingSortedMilliseconds; +} namespace DB { @@ -18,7 +21,6 @@ MergingSortedTransform::MergingSortedTransform( UInt64 limit_, bool always_read_till_end_, WriteBuffer * out_row_sources_buf_, - bool quiet_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( @@ -37,7 +39,6 @@ MergingSortedTransform::MergingSortedTransform( limit_, out_row_sources_buf_, use_average_block_sizes) - , quiet(quiet_) { } @@ -48,22 +49,7 @@ void MergingSortedTransform::onNewInput() void MergingSortedTransform::onFinish() { - if (quiet) - return; - - const auto & merged_data = algorithm.getMergedData(); - - auto log = getLogger("MergingSortedTransform"); - - double seconds = total_stopwatch.elapsedSeconds(); - - if (seconds == 0.0) - LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in 0 sec.", merged_data.totalChunks(), merged_data.totalMergedRows()); - else - LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in {} sec., {} rows/sec., {}/sec", - merged_data.totalChunks(), merged_data.totalMergedRows(), seconds, - merged_data.totalMergedRows() / seconds, - ReadableSize(merged_data.totalAllocatedBytes() / seconds)); + logMergedStats(ProfileEvents::MergingSortedMilliseconds, "Merged sorted", getLogger("MergingSortedTransform")); } } diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 2b53939f309..6e52450efa7 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -21,7 +21,6 @@ public: UInt64 limit_ = 0, bool always_read_till_end_ = false, WriteBuffer * out_row_sources_buf_ = nullptr, - bool quiet_ = false, bool use_average_block_sizes = false, bool have_all_inputs_ = true); @@ -30,9 +29,6 @@ public: protected: void onNewInput() override; void onFinish() override; - -private: - bool quiet = false; }; } diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index 2657987f161..dc262aab9ee 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -3,6 +3,10 @@ #include #include +namespace ProfileEvents +{ + extern const Event ReplacingSortedMilliseconds; +} namespace DB { @@ -38,6 +42,11 @@ public: } String getName() const override { return "ReplacingSorted"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::ReplacingSortedMilliseconds, "Replaced sorted", getLogger("ReplacingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/SummingSortedTransform.h b/src/Processors/Merges/SummingSortedTransform.h index 70ddebfea95..d7c20223d7e 100644 --- a/src/Processors/Merges/SummingSortedTransform.h +++ b/src/Processors/Merges/SummingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event SummingSortedMilliseconds; +} + namespace DB { @@ -33,6 +38,11 @@ public: } String getName() const override { return "SummingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::SummingSortedMilliseconds, "Summed sorted", getLogger("SummingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 18244469bd7..32b5d7bf343 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -3,6 +3,10 @@ #include #include +namespace ProfileEvents +{ + extern const Event VersionedCollapsingSortedMilliseconds; +} namespace DB { @@ -33,6 +37,11 @@ public: } String getName() const override { return "VersionedCollapsingTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::VersionedCollapsingSortedMilliseconds, "Versioned collapsed sorted", getLogger("VersionedCollapsingTransform")); + } }; } diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index a12fce95b10..63c10a11913 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -49,7 +49,7 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type) case TypeIndex::Float32: case TypeIndex::Float64: case TypeIndex::Nullable: - case TypeIndex::Object: + case TypeIndex::ObjectDeprecated: return false; case TypeIndex::Array: { diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 348019d7d10..734e67bda24 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -352,7 +352,15 @@ Pipe ReadFromMergeTree::readFromPoolParallelReplicas( /// We have a special logic for local replica. It has to read less data, because in some cases it should /// merge states of aggregate functions or do some other important stuff other than reading from Disk. - const auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; + auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; + const auto min_marks_for_concurrent_read_limit = std::numeric_limits::max() >> 1; + if (pool_settings.min_marks_for_concurrent_read > min_marks_for_concurrent_read_limit) + { + /// limit min marks to read in case it's big, happened in test since due to settings randomzation + pool_settings.min_marks_for_concurrent_read = min_marks_for_concurrent_read_limit; + multiplier = 1.0f; + } + if (auto result = pool_settings.min_marks_for_concurrent_read * multiplier; canConvertTo(result)) pool_settings.min_marks_for_concurrent_read = static_cast(result); else @@ -521,7 +529,15 @@ Pipe ReadFromMergeTree::readInOrder( .number_of_current_replica = client_info.number_of_current_replica, }; - const auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; + auto multiplier = context->getSettingsRef().parallel_replicas_single_task_marks_count_multiplier; + const auto min_marks_for_concurrent_read_limit = std::numeric_limits::max() >> 1; + if (pool_settings.min_marks_for_concurrent_read > min_marks_for_concurrent_read_limit) + { + /// limit min marks to read in case it's big, happened in test since due to settings randomzation + pool_settings.min_marks_for_concurrent_read = min_marks_for_concurrent_read_limit; + multiplier = 1.0f; + } + if (auto result = pool_settings.min_marks_for_concurrent_read * multiplier; canConvertTo(result)) pool_settings.min_marks_for_concurrent_read = static_cast(result); else diff --git a/src/Processors/Sources/PostgreSQLSource.cpp b/src/Processors/Sources/PostgreSQLSource.cpp index a3d6fd691d8..b9bda46bd10 100644 --- a/src/Processors/Sources/PostgreSQLSource.cpp +++ b/src/Processors/Sources/PostgreSQLSource.cpp @@ -35,9 +35,9 @@ PostgreSQLSource::PostgreSQLSource( const Block & sample_block, UInt64 max_block_size_) : ISource(sample_block.cloneEmpty()) - , query_str(query_str_) , max_block_size(max_block_size_) , connection_holder(std::move(connection_holder_)) + , query_str(query_str_) { init(sample_block); } @@ -51,10 +51,10 @@ PostgreSQLSource::PostgreSQLSource( UInt64 max_block_size_, bool auto_commit_) : ISource(sample_block.cloneEmpty()) - , query_str(query_str_) - , tx(std::move(tx_)) , max_block_size(max_block_size_) , auto_commit(auto_commit_) + , query_str(query_str_) + , tx(std::move(tx_)) { init(sample_block); } @@ -204,15 +204,15 @@ PostgreSQLSource::~PostgreSQLSource() */ stream->close(); } - - stream.reset(); - tx.reset(); } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); } + stream.reset(); + tx.reset(); + if (connection_holder) connection_holder->setBroken(); } diff --git a/src/Processors/Sources/PostgreSQLSource.h b/src/Processors/Sources/PostgreSQLSource.h index 8a648ae8bb5..319c5d8d7c2 100644 --- a/src/Processors/Sources/PostgreSQLSource.h +++ b/src/Processors/Sources/PostgreSQLSource.h @@ -38,14 +38,12 @@ protected: UInt64 max_block_size_, bool auto_commit_); - String query_str; - std::shared_ptr tx; - std::unique_ptr stream; - Status prepare() override; - void onStart(); Chunk generate() override; + + void onStart(); + void onFinish(); private: @@ -61,6 +59,12 @@ private: postgres::ConnectionHolderPtr connection_holder; std::unordered_map array_info; + +protected: + String query_str; + /// tx and stream must be destroyed before connection_holder. + std::shared_ptr tx; + std::unique_ptr stream; }; diff --git a/src/Processors/Transforms/ColumnGathererTransform.cpp b/src/Processors/Transforms/ColumnGathererTransform.cpp index 15f8355bdc7..52fa42fdb51 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.cpp +++ b/src/Processors/Transforms/ColumnGathererTransform.cpp @@ -1,11 +1,15 @@ #include +#include #include #include #include #include #include -#include +namespace ProfileEvents +{ + extern const Event GatheringColumnMilliseconds; +} namespace DB { @@ -33,6 +37,13 @@ ColumnGathererStream::ColumnGathererStream( throw Exception(ErrorCodes::EMPTY_DATA_PASSED, "There are no streams to gather"); } +void ColumnGathererStream::updateStats(const IColumn & column) +{ + merged_rows += column.size(); + merged_bytes += column.allocatedBytes(); + ++merged_blocks; +} + void ColumnGathererStream::initialize(Inputs inputs) { Columns source_columns; @@ -82,7 +93,9 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() { res.addColumn(source_to_fully_copy->column); } - merged_rows += source_to_fully_copy->size; + + updateStats(*source_to_fully_copy->column); + source_to_fully_copy->pos = source_to_fully_copy->size; source_to_fully_copy = nullptr; return Status(std::move(res)); @@ -96,8 +109,7 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() { next_required_source = 0; Chunk res; - merged_rows += sources.front().column->size(); - merged_bytes += sources.front().column->allocatedBytes(); + updateStats(*sources.front().column); res.addColumn(std::move(sources.front().column)); sources.front().pos = sources.front().size = 0; return Status(std::move(res)); @@ -123,8 +135,8 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() if (source_to_fully_copy && result_column->empty()) { Chunk res; - merged_rows += source_to_fully_copy->column->size(); - merged_bytes += source_to_fully_copy->column->allocatedBytes(); + updateStats(*source_to_fully_copy->column); + if (result_column->hasDynamicStructure()) { auto col = result_column->cloneEmpty(); @@ -140,13 +152,13 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() return Status(std::move(res)); } - auto col = result_column->cloneEmpty(); - result_column.swap(col); + auto return_column = result_column->cloneEmpty(); + result_column.swap(return_column); Chunk res; - merged_rows += col->size(); - merged_bytes += col->allocatedBytes(); - res.addColumn(std::move(col)); + updateStats(*return_column); + + res.addColumn(std::move(return_column)); return Status(std::move(res), row_sources_buf.eof() && !source_to_fully_copy); } @@ -185,31 +197,10 @@ ColumnGathererTransform::ColumnGathererTransform( toString(header.columns())); } -void ColumnGathererTransform::work() -{ - Stopwatch stopwatch; - IMergingTransform::work(); - elapsed_ns += stopwatch.elapsedNanoseconds(); -} - void ColumnGathererTransform::onFinish() { - auto merged_rows = algorithm.getMergedRows(); - auto merged_bytes = algorithm.getMergedRows(); - /// Don't print info for small parts (< 10M rows) - if (merged_rows < 10000000) - return; - - double seconds = static_cast(elapsed_ns) / 1000000000ULL; const auto & column_name = getOutputPort().getHeader().getByPosition(0).name; - - if (seconds == 0.0) - LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in 0 sec.", - column_name, static_cast(merged_bytes) / merged_rows); - else - LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in {} sec., {} rows/sec., {}/sec.", - column_name, static_cast(merged_bytes) / merged_rows, seconds, - merged_rows / seconds, ReadableSize(merged_bytes / seconds)); + logMergedStats(ProfileEvents::GatheringColumnMilliseconds, fmt::format("Gathered column {}", column_name), log); } } diff --git a/src/Processors/Transforms/ColumnGathererTransform.h b/src/Processors/Transforms/ColumnGathererTransform.h index ec5691316ce..fbc9a6bfcc6 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.h +++ b/src/Processors/Transforms/ColumnGathererTransform.h @@ -72,10 +72,11 @@ public: template void gather(Column & column_res); - UInt64 getMergedRows() const { return merged_rows; } - UInt64 getMergedBytes() const { return merged_bytes; } + MergedStats getMergedStats() const override { return {.bytes = merged_bytes, .rows = merged_rows, .blocks = merged_blocks}; } private: + void updateStats(const IColumn & column); + /// Cache required fields struct Source { @@ -105,6 +106,7 @@ private: ssize_t next_required_source = -1; UInt64 merged_rows = 0; UInt64 merged_bytes = 0; + UInt64 merged_blocks = 0; }; class ColumnGathererTransform final : public IMergingTransform @@ -120,12 +122,8 @@ public: String getName() const override { return "ColumnGathererTransform"; } - void work() override; - protected: void onFinish() override; - UInt64 elapsed_ns = 0; - LoggerPtr log; }; diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index e96a75d277b..6abfa0fccd0 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -511,6 +511,16 @@ void MergeJoinAlgorithm::logElapsed(double seconds) stat.max_blocks_loaded); } +IMergingAlgorithm::MergedStats MergeJoinAlgorithm::getMergedStats() const +{ + return + { + .bytes = stat.num_bytes[0] + stat.num_bytes[1], + .rows = stat.num_rows[0] + stat.num_rows[1], + .blocks = stat.num_blocks[0] + stat.num_blocks[1], + }; +} + static void prepareChunk(Chunk & chunk) { if (!chunk) @@ -547,6 +557,7 @@ void MergeJoinAlgorithm::consume(Input & input, size_t source_num) { stat.num_blocks[source_num] += 1; stat.num_rows[source_num] += input.chunk.getNumRows(); + stat.num_bytes[source_num] += input.chunk.allocatedBytes(); } prepareChunk(input.chunk); @@ -1271,7 +1282,7 @@ MergeJoinTransform::MergeJoinTransform( void MergeJoinTransform::onFinish() { - algorithm.logElapsed(total_stopwatch.elapsedSeconds()); + algorithm.logElapsed(static_cast(merging_elapsed_ns) / 1000000000ULL); } } diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index d37a0b9f3ae..8f74974af0f 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -245,6 +245,8 @@ public: void setAsofInequality(ASOFJoinInequality asof_inequality_); void logElapsed(double seconds); + MergedStats getMergedStats() const override; + private: std::optional handleAnyJoinState(); Status anyJoin(); @@ -280,6 +282,7 @@ private: { size_t num_blocks[2] = {0, 0}; size_t num_rows[2] = {0, 0}; + size_t num_bytes[2] = {0, 0}; size_t max_blocks_loaded = 0; }; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index ede13b29219..c45192e7118 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -185,7 +185,6 @@ void MergeSortingTransform::consume(Chunk chunk) if (!external_merging_sorted) { - bool quiet = false; bool have_all_inputs = false; bool use_average_block_sizes = false; @@ -199,7 +198,6 @@ void MergeSortingTransform::consume(Chunk chunk) limit, /*always_read_till_end_=*/ false, nullptr, - quiet, use_average_block_sizes, have_all_inputs); diff --git a/src/Processors/Transforms/PasteJoinTransform.cpp b/src/Processors/Transforms/PasteJoinTransform.cpp index d2fa7eed256..982a347a70f 100644 --- a/src/Processors/Transforms/PasteJoinTransform.cpp +++ b/src/Processors/Transforms/PasteJoinTransform.cpp @@ -58,6 +58,16 @@ static void prepareChunk(Chunk & chunk) chunk.setColumns(std::move(columns), num_rows); } +IMergingAlgorithm::MergedStats PasteJoinAlgorithm::getMergedStats() const +{ + return + { + .bytes = stat.num_bytes[0] + stat.num_bytes[1], + .rows = stat.num_rows[0] + stat.num_rows[1], + .blocks = stat.num_blocks[0] + stat.num_blocks[1], + }; +} + void PasteJoinAlgorithm::initialize(Inputs inputs) { if (inputs.size() != 2) diff --git a/src/Processors/Transforms/PasteJoinTransform.h b/src/Processors/Transforms/PasteJoinTransform.h index 6a7e65ee27c..c184f20362d 100644 --- a/src/Processors/Transforms/PasteJoinTransform.h +++ b/src/Processors/Transforms/PasteJoinTransform.h @@ -35,8 +35,7 @@ public: void initialize(Inputs inputs) override; void consume(Input & input, size_t source_num) override; Status merge() override; - - void logElapsed(double seconds); + MergedStats getMergedStats() const override; private: Chunk createBlockWithDefaults(size_t source_num); @@ -55,6 +54,7 @@ private: { size_t num_blocks[2] = {0, 0}; size_t num_rows[2] = {0, 0}; + size_t num_bytes[2] = {0, 0}; size_t max_blocks_loaded = 0; }; diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 85e6b2ec55e..bd11aa4cd28 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -1157,8 +1157,7 @@ void WindowTransform::appendChunk(Chunk & chunk) // Initialize output columns. for (auto & ws : workspaces) { - if (ws.window_function_impl) - block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices)); + block.casted_columns.push_back(ws.window_function_impl ? ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices) : nullptr); block.output_columns.push_back(ws.aggregate_function->getResultType() ->createColumn()); diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index f678d7984e8..befe5e28b5d 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -208,6 +208,12 @@ Block executePipeline(QueryPipeline && pipeline) template void assertColumnVectorEq(const typename ColumnVector::Container & expected, const Block & block, const std::string & name) { + if (expected.empty()) + { + ASSERT_TRUE(block.columns() == 0); + return; + } + const auto * actual = typeid_cast *>(block.getByName(name).column.get()); ASSERT_TRUE(actual) << "unexpected column type: " << block.getByName(name).column->dumpStructure() << "expected: " << typeid(ColumnVector).name(); @@ -230,6 +236,12 @@ void assertColumnVectorEq(const typename ColumnVector::Container & expected, template void assertColumnEq(const IColumn & expected, const Block & block, const std::string & name) { + if (expected.empty()) + { + ASSERT_TRUE(block.columns() == 0); + return; + } + const ColumnPtr & actual = block.getByName(name).column; ASSERT_TRUE(checkColumn(*actual)); ASSERT_TRUE(checkColumn(expected)); diff --git a/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp b/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp index bc22f249f97..f41a447049c 100644 --- a/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp +++ b/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp @@ -83,7 +83,7 @@ TEST(MergingSortedTest, SimpleBlockSizeTest) EXPECT_EQ(pipe.numOutputPorts(), 3); auto transform = std::make_shared(pipe.getHeader(), pipe.numOutputPorts(), sort_description, - 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, false, true); + 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, true); pipe.addTransform(std::move(transform)); @@ -125,7 +125,7 @@ TEST(MergingSortedTest, MoreInterestingBlockSizes) EXPECT_EQ(pipe.numOutputPorts(), 3); auto transform = std::make_shared(pipe.getHeader(), pipe.numOutputPorts(), sort_description, - 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, false, true); + 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, true); pipe.addTransform(std::move(transform)); diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 679f72b85ff..2b9a7295198 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -103,6 +103,7 @@ namespace DB::ErrorCodes extern const int SUPPORT_IS_DISABLED; extern const int UNSUPPORTED_METHOD; extern const int USER_EXPIRED; + extern const int NETWORK_ERROR; } namespace @@ -254,8 +255,8 @@ void TCPHandler::runImpl() socket().setSendTimeout(send_timeout); socket().setNoDelay(true); - in = std::make_shared(socket(), read_event); - out = std::make_shared(socket(), write_event); + in = std::make_shared(socket(), read_event); + out = std::make_shared(socket(), write_event); /// Support for PROXY protocol if (parse_proxy_protocol && !receiveProxyHeader()) @@ -280,6 +281,48 @@ void TCPHandler::runImpl() if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) receiveAddendum(); + { + /// Server side of chunked protocol negotiation. + /// Server advertises its protocol capabilities (separate for send and receive channels) by sending + /// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional. + /// Not optional types are strict meaning that server only supports this type, optional means that + /// server prefer this type but capable to work in opposite. + /// Client selects which type it is going to communicate based on the settings from config or arguments, + /// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake. + /// Client can detect if server's protocol capabilities are not compatible with client's settings (for example + /// server strictly requires chunked protocol but client's settings only allows notchunked protocol) - in such case + /// client should interrupt this connection. However if client continues with incompatible protocol type request, server + /// will send appropriate exception and disconnect client. + + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) + { + bool chunked_srv = chunked_srv_str.starts_with("chunked"); + bool optional_srv = chunked_srv_str.ends_with("_optional"); + bool chunked_cl = chunked_cl_str.starts_with("chunked"); + + if (optional_srv) + return chunked_cl; + + if (chunked_cl != chunked_srv) + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: {} is {}, client requested {}", + direction, + chunked_srv ? "chunked" : "notchunked", + chunked_cl ? "chunked" : "notchunked"); + + return chunked_srv; + }; + + bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "notchunked"), proto_recv_chunked_cl, "send"); + bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "notchunked"), proto_send_chunked_cl, "recv"); + + if (out_chunked) + out->enableChunked(); + if (in_chunked) + in->enableChunked(); + } + if (!is_interserver_mode) { /// If session created, then settings in session context has been updated. @@ -321,7 +364,7 @@ void TCPHandler::runImpl() { Stopwatch idle_time; UInt64 timeout_ms = std::min(poll_interval, idle_connection_timeout) * 1000000; - while (tcp_server.isOpen() && !server.isCancelled() && !static_cast(*in).poll(timeout_ms)) + while (tcp_server.isOpen() && !server.isCancelled() && !in->poll(timeout_ms)) { if (idle_time.elapsedSeconds() > idle_connection_timeout) { @@ -796,7 +839,7 @@ bool TCPHandler::readDataNext() /// We are waiting for a packet from the client. Thus, every `POLL_INTERVAL` seconds check whether we need to shut down. while (true) { - if (static_cast(*in).poll(timeout_us)) + if (in->poll(timeout_us)) { /// If client disconnected. if (in->eof()) @@ -1186,6 +1229,8 @@ void TCPHandler::processTablesStatusRequest() } response.write(*out, client_tcp_protocol_version); + + out->finishChunk(); } void TCPHandler::receiveUnexpectedTablesStatusRequest() @@ -1206,6 +1251,8 @@ void TCPHandler::sendPartUUIDs() writeVarUInt(Protocol::Server::PartUUIDs, *out); writeVectorBinary(uuids, *out); + + out->finishChunk(); out->next(); } } @@ -1214,6 +1261,8 @@ void TCPHandler::sendPartUUIDs() void TCPHandler::sendReadTaskRequestAssumeLocked() { writeVarUInt(Protocol::Server::ReadTaskRequest, *out); + + out->finishChunk(); out->next(); } @@ -1222,6 +1271,8 @@ void TCPHandler::sendMergeTreeAllRangesAnnouncementAssumeLocked(InitialAllRanges { writeVarUInt(Protocol::Server::MergeTreeAllRangesAnnouncement, *out); announcement.serialize(*out); + + out->finishChunk(); out->next(); } @@ -1230,6 +1281,8 @@ void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(ParallelReadRequest re { writeVarUInt(Protocol::Server::MergeTreeReadTaskRequest, *out); request.serialize(*out); + + out->finishChunk(); out->next(); } @@ -1238,6 +1291,8 @@ void TCPHandler::sendProfileInfo(const ProfileInfo & info) { writeVarUInt(Protocol::Server::ProfileInfo, *out); info.write(*out, client_tcp_protocol_version); + + out->finishChunk(); out->next(); } @@ -1253,6 +1308,8 @@ void TCPHandler::sendTotals(const Block & totals) state.block_out->write(totals); state.maybe_compressed_out->next(); + + out->finishChunk(); out->next(); } } @@ -1269,6 +1326,8 @@ void TCPHandler::sendExtremes(const Block & extremes) state.block_out->write(extremes); state.maybe_compressed_out->next(); + + out->finishChunk(); out->next(); } } @@ -1286,6 +1345,8 @@ void TCPHandler::sendProfileEvents() writeStringBinary("", *out); state.profile_events_block_out->write(block); + + out->finishChunk(); out->next(); auto elapsed_milliseconds = stopwatch.elapsedMilliseconds(); @@ -1323,6 +1384,8 @@ void TCPHandler::sendTimezone() LOG_DEBUG(log, "TCPHandler::sendTimezone(): {}", tz); writeVarUInt(Protocol::Server::TimezoneUpdate, *out); writeStringBinary(tz, *out); + + out->finishChunk(); out->next(); } @@ -1583,6 +1646,12 @@ void TCPHandler::receiveAddendum() if (!is_interserver_mode) session->setQuotaClientKey(quota_key); + + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + readStringBinary(proto_send_chunked_cl, *in); + readStringBinary(proto_recv_chunked_cl, *in); + } } @@ -1616,6 +1685,11 @@ void TCPHandler::sendHello() writeStringBinary(server_display_name, *out); if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) writeVarUInt(VERSION_PATCH, *out); + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + writeStringBinary(server.config().getString("proto_caps.send", "notchunked"), *out); + writeStringBinary(server.config().getString("proto_caps.recv", "notchunked"), *out); + } if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) { auto rules = server.context()->getAccessControl().getPasswordComplexityRules(); @@ -1668,6 +1742,7 @@ bool TCPHandler::receivePacket() case Protocol::Client::Ping: writeVarUInt(Protocol::Server::Pong, *out); + out->finishChunk(); out->next(); return false; @@ -2197,7 +2272,7 @@ QueryState::CancellationStatus TCPHandler::getQueryCancellationStatus() after_check_cancelled.restart(); /// During request execution the only packet that can come from the client is stopping the query. - if (static_cast(*in).poll(0)) + if (in->poll(0)) { if (in->eof()) { @@ -2248,19 +2323,33 @@ void TCPHandler::sendData(const Block & block) } writeVarUInt(Protocol::Server::Data, *out); - /// Send external table name (empty name is the main table) - writeStringBinary("", *out); /// For testing hedged requests if (block.rows() > 0 && query_context->getSettingsRef().sleep_in_send_data_ms.totalMilliseconds()) { + /// This strange sequence is needed in case of chunked protocol is enabled, in order for client not to + /// hang on receiving of at least packet type - chunk will not be processed unless either chunk footer + /// or chunk continuation header is received - first 'next' is sending starting chunk containing packet type + /// and second 'next' is sending chunk continuation header. + out->next(); + /// Send external table name (empty name is the main table) + writeStringBinary("", *out); out->next(); std::chrono::milliseconds ms(query_context->getSettingsRef().sleep_in_send_data_ms.totalMilliseconds()); std::this_thread::sleep_for(ms); } + else + { + /// Send external table name (empty name is the main table) + writeStringBinary("", *out); + } state.block_out->write(block); - state.maybe_compressed_out->next(); + + if (state.maybe_compressed_out != out) + state.maybe_compressed_out->next(); + + out->finishChunk(); out->next(); } catch (...) @@ -2296,6 +2385,8 @@ void TCPHandler::sendLogData(const Block & block) writeStringBinary("", *out); state.logs_block_out->write(block); + + out->finishChunk(); out->next(); } @@ -2307,6 +2398,7 @@ void TCPHandler::sendTableColumns(const ColumnsDescription & columns) writeStringBinary("", *out); writeStringBinary(columns.toString(), *out); + out->finishChunk(); out->next(); } @@ -2316,6 +2408,8 @@ void TCPHandler::sendException(const Exception & e, bool with_stack_trace) writeVarUInt(Protocol::Server::Exception, *out); writeException(e, *out, with_stack_trace); + + out->finishChunk(); out->next(); } @@ -2326,6 +2420,8 @@ void TCPHandler::sendEndOfStream() state.io.setAllDataSent(); writeVarUInt(Protocol::Server::EndOfStream, *out); + + out->finishChunk(); out->next(); } @@ -2344,6 +2440,8 @@ void TCPHandler::sendProgress() increment.elapsed_ns = current_elapsed_ns - state.prev_elapsed_ns; state.prev_elapsed_ns = current_elapsed_ns; increment.write(*out, client_tcp_protocol_version); + + out->finishChunk(); out->next(); } diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 74afb5a14a5..dca40e98920 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include "Core/Types.h" #include "IServer.h" @@ -186,6 +188,8 @@ private: UInt64 client_version_minor = 0; UInt64 client_version_patch = 0; UInt32 client_tcp_protocol_version = 0; + String proto_send_chunked_cl = "notchunked"; + String proto_recv_chunked_cl = "notchunked"; String quota_key; /// Connection settings, which are extracted from a context. @@ -204,8 +208,8 @@ private: ClientInfo::QueryKind query_kind = ClientInfo::QueryKind::NO_QUERY; /// Streams for reading/writing from/to client connection socket. - std::shared_ptr in; - std::shared_ptr out; + std::shared_ptr in; + std::shared_ptr out; ProfileEvents::Event read_event; ProfileEvents::Event write_event; diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index d5780e32db3..8fbd6cbd29d 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -1403,14 +1404,22 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const const GetColumnsOptions options(GetColumnsOptions::All); const auto old_data_type = all_columns.getColumn(options, column_name).type; - bool new_type_has_object = command.data_type->hasDynamicSubcolumnsDeprecated(); - bool old_type_has_object = old_data_type->hasDynamicSubcolumnsDeprecated(); + bool new_type_has_deprecated_object = command.data_type->hasDynamicSubcolumnsDeprecated(); + bool old_type_has_deprecated_object = old_data_type->hasDynamicSubcolumnsDeprecated(); - if (new_type_has_object || old_type_has_object) + if (new_type_has_deprecated_object || old_type_has_deprecated_object) throw Exception( ErrorCodes::BAD_ARGUMENTS, "The change of data type {} of column {} to {} is not allowed. It has known bugs", old_data_type->getName(), backQuote(column_name), command.data_type->getName()); + + bool has_object_type = isObject(command.data_type); + command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); + if (has_object_type) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "The change of data type {} of column {} to {} is not supported.", + old_data_type->getName(), backQuote(column_name), command.data_type->getName()); } if (command.isRemovingProperty()) diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp index fdb4cfcb371..7616b384860 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp @@ -273,6 +273,8 @@ ConnectionPoolWithFailoverPtr DistributedAsyncInsertDirectoryQueue::createPool(c address.default_database, address.user, address.password, + address.proto_send_chunked, + address.proto_recv_chunked, address.quota_key, address.cluster, address.cluster_secret, diff --git a/src/Storages/ExternalDataSourceConfiguration.cpp b/src/Storages/ExternalDataSourceConfiguration.cpp deleted file mode 100644 index 41979f8d91c..00000000000 --- a/src/Storages/ExternalDataSourceConfiguration.cpp +++ /dev/null @@ -1,288 +0,0 @@ -#include "ExternalDataSourceConfiguration.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - -IMPLEMENT_SETTINGS_TRAITS(EmptySettingsTraits, EMPTY_SETTINGS) - -static const std::unordered_set dictionary_allowed_keys = { - "host", "port", "user", "password", "quota_key", "db", - "database", "table", "schema", "replica", - "update_field", "update_lag", "invalidate_query", "query", - "where", "name", "secure", "uri", "collection"}; - - -template -SettingsChanges getSettingsChangesFromConfig( - const BaseSettings & settings, const Poco::Util::AbstractConfiguration & config, const String & config_prefix) -{ - SettingsChanges config_settings; - for (const auto & setting : settings.all()) - { - const auto & setting_name = setting.getName(); - auto setting_value = config.getString(config_prefix + '.' + setting_name, ""); - if (!setting_value.empty()) - config_settings.emplace_back(setting_name, setting_value); - } - return config_settings; -} - - -String ExternalDataSourceConfiguration::toString() const -{ - WriteBufferFromOwnString configuration_info; - configuration_info << "username: " << username << "\t"; - if (addresses.empty()) - { - configuration_info << "host: " << host << "\t"; - configuration_info << "port: " << port << "\t"; - } - else - { - for (const auto & [replica_host, replica_port] : addresses) - { - configuration_info << "host: " << replica_host << "\t"; - configuration_info << "port: " << replica_port << "\t"; - } - } - return configuration_info.str(); -} - - -void ExternalDataSourceConfiguration::set(const ExternalDataSourceConfiguration & conf) -{ - host = conf.host; - port = conf.port; - username = conf.username; - password = conf.password; - quota_key = conf.quota_key; - database = conf.database; - table = conf.table; - schema = conf.schema; - addresses = conf.addresses; - addresses_expr = conf.addresses_expr; -} - - -static void validateConfigKeys( - const Poco::Util::AbstractConfiguration & dict_config, const String & config_prefix, HasConfigKeyFunc has_config_key_func) -{ - Poco::Util::AbstractConfiguration::Keys config_keys; - dict_config.keys(config_prefix, config_keys); - for (const auto & config_key : config_keys) - { - if (!has_config_key_func(config_key)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected key `{}` in dictionary source configuration", config_key); - } -} - -template -std::optional getExternalDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, - ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings & settings) -{ - validateConfigKeys(dict_config, dict_config_prefix, has_config_key); - ExternalDataSourceConfiguration configuration; - - auto collection_name = dict_config.getString(dict_config_prefix + ".name", ""); - if (!collection_name.empty()) - { - const auto & config = context->getConfigRef(); - const auto & collection_prefix = fmt::format("named_collections.{}", collection_name); - validateConfigKeys(dict_config, collection_prefix, has_config_key); - auto config_settings = getSettingsChangesFromConfig(settings, config, collection_prefix); - auto dict_settings = getSettingsChangesFromConfig(settings, dict_config, dict_config_prefix); - /// dictionary config settings override collection settings. - config_settings.insert(config_settings.end(), dict_settings.begin(), dict_settings.end()); - - if (!config.has(collection_prefix)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection_name); - - configuration.host = dict_config.getString(dict_config_prefix + ".host", config.getString(collection_prefix + ".host", "")); - configuration.port = dict_config.getInt(dict_config_prefix + ".port", config.getUInt(collection_prefix + ".port", 0)); - configuration.username = dict_config.getString(dict_config_prefix + ".user", config.getString(collection_prefix + ".user", "")); - configuration.password = dict_config.getString(dict_config_prefix + ".password", config.getString(collection_prefix + ".password", "")); - configuration.quota_key = dict_config.getString(dict_config_prefix + ".quota_key", config.getString(collection_prefix + ".quota_key", "")); - configuration.database = dict_config.getString(dict_config_prefix + ".db", config.getString(dict_config_prefix + ".database", - config.getString(collection_prefix + ".db", config.getString(collection_prefix + ".database", "")))); - configuration.table = dict_config.getString(dict_config_prefix + ".table", config.getString(collection_prefix + ".table", "")); - configuration.schema = dict_config.getString(dict_config_prefix + ".schema", config.getString(collection_prefix + ".schema", "")); - - if (configuration.host.empty() || configuration.port == 0 || configuration.username.empty() || configuration.table.empty()) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Named collection of connection parameters is missing some " - "of the parameters and dictionary parameters are not added"); - } - return ExternalDataSourceInfo{.configuration = configuration, .settings_changes = config_settings}; - } - return std::nullopt; -} - -std::optional getURLBasedDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context) -{ - URLBasedDataSourceConfiguration configuration; - auto collection_name = dict_config.getString(dict_config_prefix + ".name", ""); - if (!collection_name.empty()) - { - const auto & config = context->getConfigRef(); - const auto & collection_prefix = fmt::format("named_collections.{}", collection_name); - - if (!config.has(collection_prefix)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection_name); - - configuration.url = - dict_config.getString(dict_config_prefix + ".url", config.getString(collection_prefix + ".url", "")); - configuration.endpoint = - dict_config.getString(dict_config_prefix + ".endpoint", config.getString(collection_prefix + ".endpoint", "")); - configuration.format = - dict_config.getString(dict_config_prefix + ".format", config.getString(collection_prefix + ".format", "")); - configuration.compression_method = - dict_config.getString(dict_config_prefix + ".compression", config.getString(collection_prefix + ".compression_method", "")); - configuration.structure = - dict_config.getString(dict_config_prefix + ".structure", config.getString(collection_prefix + ".structure", "")); - configuration.user = - dict_config.getString(dict_config_prefix + ".credentials.user", config.getString(collection_prefix + ".credentials.user", "")); - configuration.password = - dict_config.getString(dict_config_prefix + ".credentials.password", config.getString(collection_prefix + ".credentials.password", "")); - - String headers_prefix; - const Poco::Util::AbstractConfiguration *headers_config = nullptr; - if (dict_config.has(dict_config_prefix + ".headers")) - { - headers_prefix = dict_config_prefix + ".headers"; - headers_config = &dict_config; - } - else - { - headers_prefix = collection_prefix + ".headers"; - headers_config = &config; - } - - if (headers_config) - { - Poco::Util::AbstractConfiguration::Keys header_keys; - headers_config->keys(headers_prefix, header_keys); - headers_prefix += "."; - for (const auto & header : header_keys) - { - const auto header_prefix = headers_prefix + header; - configuration.headers.emplace_back( - headers_config->getString(header_prefix + ".name"), - headers_config->getString(header_prefix + ".value")); - } - } - - return URLBasedDataSourceConfig{ .configuration = configuration }; - } - - return std::nullopt; -} - -ExternalDataSourcesByPriority getExternalDataSourceConfigurationByPriority( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context, HasConfigKeyFunc has_config_key) -{ - validateConfigKeys(dict_config, dict_config_prefix, has_config_key); - ExternalDataSourceConfiguration common_configuration; - - auto named_collection = getExternalDataSourceConfiguration(dict_config, dict_config_prefix, context, has_config_key); - if (named_collection) - { - common_configuration = named_collection->configuration; - } - else - { - common_configuration.host = dict_config.getString(dict_config_prefix + ".host", ""); - common_configuration.port = dict_config.getUInt(dict_config_prefix + ".port", 0); - common_configuration.username = dict_config.getString(dict_config_prefix + ".user", ""); - common_configuration.password = dict_config.getString(dict_config_prefix + ".password", ""); - common_configuration.quota_key = dict_config.getString(dict_config_prefix + ".quota_key", ""); - common_configuration.database = dict_config.getString(dict_config_prefix + ".db", dict_config.getString(dict_config_prefix + ".database", "")); - common_configuration.table = dict_config.getString(fmt::format("{}.table", dict_config_prefix), ""); - common_configuration.schema = dict_config.getString(fmt::format("{}.schema", dict_config_prefix), ""); - } - - ExternalDataSourcesByPriority configuration - { - .database = common_configuration.database, - .table = common_configuration.table, - .schema = common_configuration.schema, - .replicas_configurations = {} - }; - - if (dict_config.has(dict_config_prefix + ".replica")) - { - Poco::Util::AbstractConfiguration::Keys config_keys; - dict_config.keys(dict_config_prefix, config_keys); - - for (const auto & config_key : config_keys) - { - if (config_key.starts_with("replica")) - { - ExternalDataSourceConfiguration replica_configuration(common_configuration); - String replica_name = dict_config_prefix + "." + config_key; - validateConfigKeys(dict_config, replica_name, has_config_key); - - size_t priority = dict_config.getInt(replica_name + ".priority", 0); - replica_configuration.host = dict_config.getString(replica_name + ".host", common_configuration.host); - replica_configuration.port = dict_config.getUInt(replica_name + ".port", common_configuration.port); - replica_configuration.username = dict_config.getString(replica_name + ".user", common_configuration.username); - replica_configuration.password = dict_config.getString(replica_name + ".password", common_configuration.password); - replica_configuration.quota_key = dict_config.getString(replica_name + ".quota_key", common_configuration.quota_key); - - if (replica_configuration.host.empty() || replica_configuration.port == 0 - || replica_configuration.username.empty() || replica_configuration.password.empty()) - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Named collection of connection parameters is missing some " - "of the parameters and no other dictionary parameters are added"); - } - - configuration.replicas_configurations[priority].emplace_back(replica_configuration); - } - } - } - else - { - configuration.replicas_configurations[0].emplace_back(common_configuration); - } - - return configuration; -} - - -void URLBasedDataSourceConfiguration::set(const URLBasedDataSourceConfiguration & conf) -{ - url = conf.url; - format = conf.format; - compression_method = conf.compression_method; - structure = conf.structure; - http_method = conf.http_method; - headers = conf.headers; -} - -template -std::optional getExternalDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, - ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings & settings); - -template -SettingsChanges getSettingsChangesFromConfig( - const BaseSettings & settings, const Poco::Util::AbstractConfiguration & config, const String & config_prefix); - -} diff --git a/src/Storages/ExternalDataSourceConfiguration.h b/src/Storages/ExternalDataSourceConfiguration.h deleted file mode 100644 index c703c9ce999..00000000000 --- a/src/Storages/ExternalDataSourceConfiguration.h +++ /dev/null @@ -1,92 +0,0 @@ -#pragma once - -#include -#include -#include -#include - - -namespace DB -{ - -#define EMPTY_SETTINGS(M, ALIAS) -DECLARE_SETTINGS_TRAITS(EmptySettingsTraits, EMPTY_SETTINGS) - -struct EmptySettings : public BaseSettings {}; - -struct ExternalDataSourceConfiguration -{ - String host; - UInt16 port = 0; - String username = "default"; - String password; - String quota_key; - String database; - String table; - String schema; - - std::vector> addresses; /// Failover replicas. - String addresses_expr; - - String toString() const; - - void set(const ExternalDataSourceConfiguration & conf); -}; - - -using StorageSpecificArgs = std::vector>; - -struct ExternalDataSourceInfo -{ - ExternalDataSourceConfiguration configuration; - SettingsChanges settings_changes; -}; - -using HasConfigKeyFunc = std::function; - -template -std::optional getExternalDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, - ContextPtr context, HasConfigKeyFunc has_config_key, const BaseSettings & settings = {}); - - -/// Highest priority is 0, the bigger the number in map, the less the priority. -using ExternalDataSourcesConfigurationByPriority = std::map>; - -struct ExternalDataSourcesByPriority -{ - String database; - String table; - String schema; - ExternalDataSourcesConfigurationByPriority replicas_configurations; -}; - -ExternalDataSourcesByPriority -getExternalDataSourceConfigurationByPriority(const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context, HasConfigKeyFunc has_config_key); - -struct URLBasedDataSourceConfiguration -{ - String url; - String endpoint; - String format = "auto"; - String compression_method = "auto"; - String structure = "auto"; - - String user; - String password; - - HTTPHeaderEntries headers; - String http_method; - - void set(const URLBasedDataSourceConfiguration & conf); -}; - -struct URLBasedDataSourceConfig -{ - URLBasedDataSourceConfiguration configuration; -}; - -std::optional getURLBasedDataSourceConfiguration( - const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context); - -} diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h index 0477a08b0d2..6de7e60285f 100644 --- a/src/Storages/IStorage.h +++ b/src/Storages/IStorage.h @@ -103,7 +103,7 @@ public: IStorage(const IStorage &) = delete; IStorage & operator=(const IStorage &) = delete; - /// The main name of the table type (for example, StorageMergeTree). + /// The main name of the table type (e.g. Memory, MergeTree, CollapsingMergeTree). virtual std::string getName() const = 0; /// The name of the table. diff --git a/src/Storages/MaterializedView/RefreshSet.h b/src/Storages/MaterializedView/RefreshSet.h index 7fb583fd316..6141a69996a 100644 --- a/src/Storages/MaterializedView/RefreshSet.h +++ b/src/Storages/MaterializedView/RefreshSet.h @@ -10,7 +10,7 @@ namespace DB { -enum class RefreshState : RefreshTaskStateUnderlying +enum class RefreshState { Disabled = 0, Scheduled, @@ -18,11 +18,11 @@ enum class RefreshState : RefreshTaskStateUnderlying Running, }; -enum class LastRefreshResult : RefreshTaskStateUnderlying +enum class LastRefreshResult { Unknown = 0, Cancelled, - Exception, + Error, Finished }; @@ -36,7 +36,8 @@ struct RefreshInfo UInt64 last_attempt_duration_ms = 0; UInt32 next_refresh_time = 0; UInt64 refresh_count = 0; - String exception_message; // if last_refresh_result is Exception + UInt64 retry = 0; + String exception_message; // if last_refresh_result is Error std::vector remaining_dependencies; ProgressValues progress; }; diff --git a/src/Storages/MaterializedView/RefreshSettings.h b/src/Storages/MaterializedView/RefreshSettings.h index 814c7e52b32..23676538788 100644 --- a/src/Storages/MaterializedView/RefreshSettings.h +++ b/src/Storages/MaterializedView/RefreshSettings.h @@ -6,8 +6,10 @@ namespace DB { #define LIST_OF_REFRESH_SETTINGS(M, ALIAS) \ - /// TODO: Add settings - /// M(UInt64, name, 42, "...", 0) + M(Int64, refresh_retries, 0, "How many times to retry refresh query if it fails. If all attempts fail, wait for the next refresh time according to schedule. 0 to disable retries. -1 for infinite retries.", 0) \ + M(UInt64, refresh_retry_initial_backoff_ms, 100, "Delay before the first retry if refresh query fails (if refresh_retries setting is not zero). Each subsequent retry doubles the delay, up to refresh_retry_max_backoff_ms.", 0) \ + M(UInt64, refresh_retry_max_backoff_ms, 60'000, "Limit on the exponential growth of delay between refresh attempts, if they keep failing and refresh_retries is positive.", 0) \ + DECLARE_SETTINGS_TRAITS(RefreshSettingsTraits, LIST_OF_REFRESH_SETTINGS) diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index 0837eaf97fd..ed5a6652288 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -1,7 +1,5 @@ #include -#include - #include #include #include @@ -11,6 +9,7 @@ #include #include #include +#include namespace CurrentMetrics { @@ -24,37 +23,39 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int QUERY_WAS_CANCELLED; + extern const int REFRESH_FAILED; } RefreshTask::RefreshTask( - const ASTRefreshStrategy & strategy) + StorageMaterializedView * view_, const DB::ASTRefreshStrategy & strategy) : log(getLogger("RefreshTask")) + , view(view_) , refresh_schedule(strategy) -{} + , refresh_append(strategy.append) +{ + if (strategy.settings != nullptr) + refresh_settings.applyChanges(strategy.settings->changes); +} -RefreshTaskHolder RefreshTask::create( +OwnedRefreshTask RefreshTask::create( + StorageMaterializedView * view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy) { - auto task = std::make_shared(strategy); + auto task = std::make_shared(view, strategy); - task->refresh_task = context->getSchedulePool().createTask("MaterializedViewRefresherTask", - [self = task->weak_from_this()] - { - if (auto t = self.lock()) - t->refreshTask(); - }); + task->refresh_task = context->getSchedulePool().createTask("RefreshTask", + [self = task.get()] { self->refreshTask(); }); if (strategy.dependencies) for (auto && dependency : strategy.dependencies->children) task->initial_dependencies.emplace_back(dependency->as()); - return task; + return OwnedRefreshTask(task); } -void RefreshTask::initializeAndStart(std::shared_ptr view) +void RefreshTask::initializeAndStart() { - view_to_refresh = view; if (view->getContext()->getSettingsRef().stop_refreshable_materialized_views_on_startup) stop_requested = true; view->getContext()->getRefreshSet().emplace(view->getStorageID(), initial_dependencies, shared_from_this()); @@ -102,7 +103,11 @@ void RefreshTask::alterRefreshParams(const DB::ASTRefreshStrategy & new_strategy if (arriveDependency(id) && !std::exchange(refresh_immediately, true)) refresh_task->schedule(); - /// TODO: Update settings once we have them. + refresh_settings = {}; + if (new_strategy.settings != nullptr) + refresh_settings.applyChanges(new_strategy.settings->changes); + + refresh_append = new_strategy.append; } RefreshInfo RefreshTask::getInfo() const @@ -111,7 +116,7 @@ RefreshInfo RefreshTask::getInfo() const auto res = info; res.view_id = set_handle.getID(); res.remaining_dependencies.assign(remaining_dependencies.begin(), remaining_dependencies.end()); - if (res.last_refresh_result != LastRefreshResult::Exception) + if (res.last_refresh_result != LastRefreshResult::Error) res.exception_message.clear(); res.progress = progress.getValues(); return res; @@ -139,6 +144,8 @@ void RefreshTask::run() std::lock_guard guard(mutex); if (std::exchange(refresh_immediately, true)) return; + next_refresh_prescribed = std::chrono::floor(currentTime()); + next_refresh_actual = currentTime(); refresh_task->schedule(); } @@ -149,10 +156,22 @@ void RefreshTask::cancel() refresh_task->schedule(); } +void RefreshTask::wait() +{ + std::unique_lock lock(mutex); + refresh_cv.wait(lock, [&] { return info.state != RefreshState::Running && !refresh_immediately; }); + if (info.last_refresh_result == LastRefreshResult::Error) + throw Exception(ErrorCodes::REFRESH_FAILED, "Refresh failed: {}", info.exception_message); +} + void RefreshTask::shutdown() { { std::lock_guard guard(mutex); + + if (view == nullptr) + return; // already shut down + stop_requested = true; interruptExecution(); } @@ -166,6 +185,8 @@ void RefreshTask::shutdown() /// (Also, RefreshSet holds a shared_ptr to us.) std::lock_guard guard(mutex); set_handle.reset(); + + view = nullptr; } void RefreshTask::notify(const StorageID & parent_id, std::chrono::sys_seconds parent_next_prescribed_time) @@ -232,6 +253,7 @@ void RefreshTask::refreshTask() chassert(lock.owns_lock()); interrupt_execution.store(false); + refresh_cv.notify_all(); // we'll assign info.state before unlocking the mutex if (stop_requested) { @@ -243,7 +265,7 @@ void RefreshTask::refreshTask() if (!refresh_immediately) { auto now = currentTime(); - if (now >= next_refresh_with_spread) + if (now >= next_refresh_actual) { if (arriveTime()) refresh_immediately = true; @@ -256,7 +278,7 @@ void RefreshTask::refreshTask() else { size_t delay_ms = std::chrono::duration_cast( - next_refresh_with_spread - now).count(); + next_refresh_actual - now).count(); /// If we're in a test that fakes the clock, poll every 100ms. if (fake_clock.load(std::memory_order_relaxed) != INT64_MIN) @@ -270,19 +292,9 @@ void RefreshTask::refreshTask() /// Perform a refresh. + bool append = refresh_append; refresh_immediately = false; - - auto view = lockView(); - if (!view) - { - /// The view was dropped. This RefreshTask should be destroyed soon too. - /// (Maybe this is unreachable.) - info.state = RefreshState::Disabled; - break; - } - info.state = RefreshState::Running; - CurrentMetrics::Increment metric_inc(CurrentMetrics::RefreshingViews); lock.unlock(); @@ -293,19 +305,13 @@ void RefreshTask::refreshTask() try { - executeRefreshUnlocked(view); + executeRefreshUnlocked(append); refreshed = true; } catch (...) { if (!interrupt_execution.load()) - { - PreformattedMessage message = getCurrentExceptionMessageAndPattern(true); - auto text = message.text; - message.text = fmt::format("Refresh view {} failed: {}", view->getStorageID().getFullTableName(), message.text); - LOG_ERROR(log, message); - exception = text; - } + exception = getCurrentExceptionMessage(true); } lock.lock(); @@ -317,18 +323,18 @@ void RefreshTask::refreshTask() if (exception) { - info.last_refresh_result = LastRefreshResult::Exception; + info.last_refresh_result = LastRefreshResult::Error; info.exception_message = *exception; - - /// TODO: Do a few retries with exponential backoff. - advanceNextRefreshTime(now); + Int64 attempt_number = num_retries + 1; + scheduleRetryOrSkipToNextRefresh(now); + LOG_ERROR(log, "Refresh view {} failed (attempt {}/{}): {}", view->getStorageID().getFullTableName(), attempt_number, refresh_settings.refresh_retries + 1, *exception); } else if (!refreshed) { info.last_refresh_result = LastRefreshResult::Cancelled; /// Make sure we don't just start another refresh immediately. - if (!stop_requested && now >= next_refresh_with_spread) + if (!stop_requested) advanceNextRefreshTime(now); } else @@ -361,17 +367,18 @@ void RefreshTask::refreshTask() } } -void RefreshTask::executeRefreshUnlocked(std::shared_ptr view) +void RefreshTask::executeRefreshUnlocked(bool append) { LOG_DEBUG(log, "Refreshing view {}", view->getStorageID().getFullTableName()); progress.reset(); - /// Create a table. - auto [refresh_context, refresh_query] = view->prepareRefresh(); - - StorageID stale_table = StorageID::createEmpty(); + ContextMutablePtr refresh_context = view->createRefreshContext(); + std::optional table_to_drop; try { + /// Create a table. + auto refresh_query = view->prepareRefresh(append, refresh_context, table_to_drop); + /// Run the query. { CurrentThread::QueryScope query_scope(refresh_context); // create a thread group for the query @@ -429,37 +436,55 @@ void RefreshTask::executeRefreshUnlocked(std::shared_ptrexchangeTargetTable(refresh_query->table_id, refresh_context); + if (!append) + table_to_drop = view->exchangeTargetTable(refresh_query->table_id, refresh_context); } catch (...) { - try - { - InterpreterDropQuery::executeDropQuery( - ASTDropQuery::Kind::Drop, view->getContext(), refresh_context, refresh_query->table_id, /*sync*/ false, /*ignore_sync_setting*/ true); - } - catch (...) - { - tryLogCurrentException(log, "Failed to drop temporary table after a failed refresh"); - /// Let's ignore this and keep going, at risk of accumulating many trash tables if this keeps happening. - } + if (table_to_drop.has_value()) + view->dropTempTable(table_to_drop.value(), refresh_context); throw; } - /// Drop the old table (outside the try-catch so we don't try to drop the other table if this fails). - InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, view->getContext(), refresh_context, stale_table, /*sync*/ true, /*ignore_sync_setting*/ true); + if (table_to_drop.has_value()) + view->dropTempTable(table_to_drop.value(), refresh_context); } void RefreshTask::advanceNextRefreshTime(std::chrono::system_clock::time_point now) { std::chrono::sys_seconds next = refresh_schedule.prescribeNext(next_refresh_prescribed, now); next_refresh_prescribed = next; - next_refresh_with_spread = refresh_schedule.addRandomSpread(next); + next_refresh_actual = refresh_schedule.addRandomSpread(next); - auto secs = std::chrono::floor(next_refresh_with_spread); + num_retries = 0; + info.retry = num_retries; + + auto secs = std::chrono::floor(next_refresh_actual); info.next_refresh_time = UInt32(secs.time_since_epoch().count()); } +void RefreshTask::scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now) +{ + if (refresh_settings.refresh_retries >= 0 && num_retries >= refresh_settings.refresh_retries) + { + advanceNextRefreshTime(now); + return; + } + + num_retries += 1; + info.retry = num_retries; + + UInt64 delay_ms; + UInt64 multiplier = UInt64(1) << std::min(num_retries - 1, Int64(62)); + /// Overflow check: a*b <= c iff a <= c/b iff a <= floor(c/b). + if (refresh_settings.refresh_retry_initial_backoff_ms <= refresh_settings.refresh_retry_max_backoff_ms / multiplier) + delay_ms = refresh_settings.refresh_retry_initial_backoff_ms * multiplier; + else + delay_ms = refresh_settings.refresh_retry_max_backoff_ms; + + next_refresh_actual = now + std::chrono::milliseconds(delay_ms); +} + bool RefreshTask::arriveDependency(const StorageID & parent) { remaining_dependencies.erase(parent); @@ -500,11 +525,6 @@ void RefreshTask::interruptExecution() } } -std::shared_ptr RefreshTask::lockView() -{ - return std::static_pointer_cast(view_to_refresh.lock()); -} - std::chrono::system_clock::time_point RefreshTask::currentTime() const { Int64 fake = fake_clock.load(std::memory_order::relaxed); diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index 623493f6aec..ad9d949e18e 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -17,19 +17,21 @@ class PipelineExecutor; class StorageMaterializedView; class ASTRefreshStrategy; +struct OwnedRefreshTask; class RefreshTask : public std::enable_shared_from_this { public: /// Never call it manually, public for shared_ptr construction only - explicit RefreshTask(const ASTRefreshStrategy & strategy); + RefreshTask(StorageMaterializedView * view_, const ASTRefreshStrategy & strategy); /// The only proper way to construct task - static RefreshTaskHolder create( + static OwnedRefreshTask create( + StorageMaterializedView * view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy); - void initializeAndStart(std::shared_ptr view); + void initializeAndStart(); // called at most once /// Call when renaming the materialized view. void rename(StorageID new_id); @@ -51,7 +53,14 @@ public: /// Cancel task execution void cancel(); + /// Waits for the currently running refresh attempt to complete. + /// If the refresh fails, throws an exception. + /// If no refresh is running, completes immediately, throwing an exception if previous refresh failed. + void wait(); + /// Permanently disable task scheduling and remove this table from RefreshSet. + /// Ok to call multiple times, but not in parallel. + /// Ok to call even if initializeAndStart() wasn't called or failed. void shutdown(); /// Notify dependent task @@ -65,7 +74,7 @@ public: private: LoggerPtr log = nullptr; - std::weak_ptr view_to_refresh; + StorageMaterializedView * view; /// Protects interrupt_execution and running_executor. /// Can be locked while holding `mutex`. @@ -82,8 +91,10 @@ private: mutable std::mutex mutex; RefreshSchedule refresh_schedule; - RefreshSettings refresh_settings; // TODO: populate, use, update on alter + RefreshSettings refresh_settings; std::vector initial_dependencies; + bool refresh_append; + RefreshSet::Handle set_handle; /// StorageIDs of our dependencies that we're waiting for. @@ -112,7 +123,8 @@ private: /// E.g. for REFRESH EVERY 1 DAY, yesterday's refresh of the dependency shouldn't trigger today's /// refresh of the dependent even if it happened today (e.g. it was slow or had random spread > 1 day). std::chrono::sys_seconds next_refresh_prescribed; - std::chrono::system_clock::time_point next_refresh_with_spread; + std::chrono::system_clock::time_point next_refresh_actual; + Int64 num_retries = 0; /// Calls refreshTask() from background thread. BackgroundSchedulePool::TaskHolder refresh_task; @@ -123,6 +135,7 @@ private: /// Just for observability. RefreshInfo info; Progress progress; + std::condition_variable refresh_cv; // notified when info.state changes /// The main loop of the refresh task. It examines the state, sees what needs to be /// done and does it. If there's nothing to do at the moment, returns; it's then scheduled again, @@ -134,11 +147,14 @@ private: /// Perform an actual refresh: create new table, run INSERT SELECT, exchange tables, drop old table. /// Mutex must be unlocked. Called only from refresh_task. - void executeRefreshUnlocked(std::shared_ptr view); + void executeRefreshUnlocked(bool append); /// Assigns next_refresh_* void advanceNextRefreshTime(std::chrono::system_clock::time_point now); + /// Either advances next_refresh_actual using exponential backoff or does advanceNextRefreshTime(). + void scheduleRetryOrSkipToNextRefresh(std::chrono::system_clock::time_point now); + /// Returns true if all dependencies are fulfilled now. Refills remaining_dependencies in this case. bool arriveDependency(const StorageID & parent); bool arriveTime(); @@ -146,9 +162,24 @@ private: void interruptExecution(); - std::shared_ptr lockView(); - std::chrono::system_clock::time_point currentTime() const; }; +/// Wrapper around shared_ptr, calls shutdown() in destructor. +struct OwnedRefreshTask +{ + RefreshTaskHolder ptr; + + OwnedRefreshTask() = default; + explicit OwnedRefreshTask(RefreshTaskHolder p) : ptr(std::move(p)) {} + OwnedRefreshTask(OwnedRefreshTask &&) = default; + OwnedRefreshTask & operator=(OwnedRefreshTask &&) = default; + + ~OwnedRefreshTask() { if (ptr) ptr->shutdown(); } + + RefreshTask* operator->() const { return ptr.get(); } + RefreshTask& operator*() const { return *ptr; } + explicit operator bool() const { return ptr != nullptr; } +}; + } diff --git a/src/Storages/MaterializedView/RefreshTask_fwd.h b/src/Storages/MaterializedView/RefreshTask_fwd.h index 9a0a122381e..ff17c839dc5 100644 --- a/src/Storages/MaterializedView/RefreshTask_fwd.h +++ b/src/Storages/MaterializedView/RefreshTask_fwd.h @@ -8,9 +8,7 @@ namespace DB class RefreshTask; -using RefreshTaskStateUnderlying = UInt8; using RefreshTaskHolder = std::shared_ptr; -using RefreshTaskObserver = std::weak_ptr; using RefreshTaskList = std::list; } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index b6e4f0af696..195aa4fdc10 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -1662,11 +1662,9 @@ void IMergeTreeDataPart::loadColumns(bool require) } -/// Project part / part with project parts / compact part doesn't support LWD. bool IMergeTreeDataPart::supportLightweightDeleteMutate() const { - return (part_type == MergeTreeDataPartType::Wide || part_type == MergeTreeDataPartType::Compact) && - parent_part == nullptr && projection_parts.empty(); + return (part_type == MergeTreeDataPartType::Wide || part_type == MergeTreeDataPartType::Compact); } bool IMergeTreeDataPart::hasLightweightDelete() const diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 2e57e172a7f..1ed096fae17 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -888,13 +888,22 @@ static Field applyFunctionForField( return (*col)[0]; } +/// applyFunction will execute the function with one `field` or the column which `field` refers to. static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & current_type, const FieldRef & field) { + chassert(func != nullptr); /// Fallback for fields without block reference. if (field.isExplicit()) return applyFunctionForField(func, current_type, field); - String result_name = "_" + func->getName() + "_" + toString(field.column_idx); + /// We will cache the function result inside `field.columns`, because this function will call many times + /// from many fields from same column. When the column is huge, for example there are thousands of marks, we need a cache. + /// The cache key is like `_[function_pointer]_[param_column_id]` to identify a unique pair. + WriteBufferFromOwnString buf; + writeText("_", buf); + writePointerHex(func.get(), buf); + writeText("_" + toString(field.column_idx), buf); + String result_name = buf.str(); const auto & columns = field.columns; size_t result_idx = columns->size(); @@ -906,6 +915,7 @@ static FieldRef applyFunction(const FunctionBasePtr & func, const DataTypePtr & if (result_idx == columns->size()) { + /// When cache is missed, we calculate the whole column where the field comes from. This will avoid repeated calculation. ColumnsWithTypeAndName args{(*columns)[field.column_idx]}; field.columns->emplace_back(ColumnWithTypeAndName {nullptr, func->getResultType(), result_name}); (*columns)[result_idx].column = func->execute(args, (*columns)[result_idx].type, columns->front().column->size()); diff --git a/src/Storages/MergeTree/MergeProgress.h b/src/Storages/MergeTree/MergeProgress.h index dd4922051b5..8562e81e761 100644 --- a/src/Storages/MergeTree/MergeProgress.h +++ b/src/Storages/MergeTree/MergeProgress.h @@ -8,10 +8,10 @@ namespace ProfileEvents { - extern const Event MergesTimeMilliseconds; extern const Event MergedUncompressedBytes; extern const Event MergedRows; - extern const Event Merge; + extern const Event MutatedRows; + extern const Event MutatedUncompressedBytes; } namespace DB @@ -63,18 +63,17 @@ public: void updateWatch() { UInt64 watch_curr_elapsed = merge_list_element_ptr->watch.elapsed(); - ProfileEvents::increment(ProfileEvents::MergesTimeMilliseconds, (watch_curr_elapsed - watch_prev_elapsed) / 1000000); watch_prev_elapsed = watch_curr_elapsed; } - void operator() (const Progress & value) + void operator()(const Progress & value) { - ProfileEvents::increment(ProfileEvents::MergedUncompressedBytes, value.read_bytes); - if (stage.is_first) - { - ProfileEvents::increment(ProfileEvents::MergedRows, value.read_rows); - ProfileEvents::increment(ProfileEvents::Merge); - } + if (merge_list_element_ptr->is_mutation) + updateProfileEvents(value, ProfileEvents::MutatedRows, ProfileEvents::MutatedUncompressedBytes); + else + updateProfileEvents(value, ProfileEvents::MergedRows, ProfileEvents::MergedUncompressedBytes); + + updateWatch(); merge_list_element_ptr->bytes_read_uncompressed += value.read_bytes; @@ -90,6 +89,14 @@ public: std::memory_order_relaxed); } } + +private: + void updateProfileEvents(const Progress & value, ProfileEvents::Event rows_event, ProfileEvents::Event bytes_event) const + { + ProfileEvents::increment(bytes_event, value.read_bytes); + if (stage.is_first) + ProfileEvents::increment(rows_event, value.read_rows); + } }; } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index ce06adf110c..26cb821f33b 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,18 @@ #include #include +namespace ProfileEvents +{ + extern const Event Merge; + extern const Event MergedColumns; + extern const Event GatheredColumns; + extern const Event MergeTotalMilliseconds; + extern const Event MergeExecuteMilliseconds; + extern const Event MergeHorizontalStageExecuteMilliseconds; + extern const Event MergeVerticalStageExecuteMilliseconds; + extern const Event MergeProjectionStageExecuteMilliseconds; +} + namespace DB { @@ -169,6 +182,8 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::extractMergingAndGatheringColu bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() { + ProfileEvents::increment(ProfileEvents::Merge); + String local_tmp_prefix; if (global_ctx->need_prefix) { @@ -446,6 +461,13 @@ void MergeTask::addGatheringColumn(GlobalRuntimeContextPtr global_ctx, const Str MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::getContextForNextStage() { + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeHorizontalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } + auto new_ctx = std::make_shared(); new_ctx->rows_sources_write_buf = std::move(ctx->rows_sources_write_buf); @@ -463,8 +485,14 @@ MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::g MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNextStage() { - auto new_ctx = std::make_shared(); + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeVerticalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } + auto new_ctx = std::make_shared(); new_ctx->need_sync = std::move(ctx->need_sync); ctx.reset(); @@ -474,9 +502,14 @@ MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNe bool MergeTask::ExecuteAndFinalizeHorizontalPart::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -486,11 +519,20 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::execute() bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() { - Block block; - if (!ctx->is_cancelled() && (global_ctx->merging_executor->pull(block))) - { - global_ctx->rows_written += block.rows(); + Stopwatch watch(CLOCK_MONOTONIC_COARSE); + UInt64 step_time_ms = global_ctx->data->getSettings()->background_task_preferred_step_execution_time_ms.totalMilliseconds(); + do + { + Block block; + + if (ctx->is_cancelled() || !global_ctx->merging_executor->pull(block)) + { + finalize(); + return false; + } + + global_ctx->rows_written += block.rows(); const_cast(*global_ctx->to).write(block); UInt64 result_rows = 0; @@ -510,11 +552,14 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() global_ctx->space_reservation->update(static_cast((1. - progress) * ctx->initial_reservation)); } + } while (watch.elapsedMilliseconds() < step_time_ms); - /// Need execute again - return true; - } + /// Need execute again + return true; +} +void MergeTask::ExecuteAndFinalizeHorizontalPart::finalize() const +{ global_ctx->merging_executor.reset(); global_ctx->merged_pipeline.reset(); @@ -524,17 +569,13 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() if (ctx->need_remove_expired_values && global_ctx->ttl_merges_blocker->isCancelled()) throw Exception(ErrorCodes::ABORTED, "Cancelled merging parts with expired TTL"); - const auto data_settings = global_ctx->data->getSettings(); const size_t sum_compressed_bytes_upper_bound = global_ctx->merge_list_element_ptr->total_size_bytes_compressed; - ctx->need_sync = needSyncPart(ctx->sum_input_rows_upper_bound, sum_compressed_bytes_upper_bound, *data_settings); - - return false; + ctx->need_sync = needSyncPart(ctx->sum_input_rows_upper_bound, sum_compressed_bytes_upper_bound, *global_ctx->data->getSettings()); } - bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const { - /// No need to execute this part if it is horizontal merge. + /// No need to execute this part if it is horizontal merge. if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; @@ -708,17 +749,24 @@ void MergeTask::VerticalMergeStage::prepareVerticalMergeForOneColumn() const bool MergeTask::VerticalMergeStage::executeVerticalMergeForOneColumn() const { - Block block; - if (!global_ctx->merges_blocker->isCancelled() && !global_ctx->merge_list_element_ptr->is_cancelled.load(std::memory_order_relaxed) - && ctx->executor->pull(block)) + Stopwatch watch(CLOCK_MONOTONIC_COARSE); + UInt64 step_time_ms = global_ctx->data->getSettings()->background_task_preferred_step_execution_time_ms.totalMilliseconds(); + + do { + Block block; + + if (global_ctx->merges_blocker->isCancelled() + || global_ctx->merge_list_element_ptr->is_cancelled.load(std::memory_order_relaxed) + || !ctx->executor->pull(block)) + return false; + ctx->column_elems_written += block.rows(); ctx->column_to->write(block); + } while (watch.elapsedMilliseconds() < step_time_ms); - /// Need execute again - return true; - } - return false; + /// Need execute again + return true; } @@ -784,6 +832,9 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c /// Print overall profiling info. NOTE: it may duplicates previous messages { + ProfileEvents::increment(ProfileEvents::MergedColumns, global_ctx->merging_columns.size()); + ProfileEvents::increment(ProfileEvents::GatheredColumns, global_ctx->gathering_columns.size()); + double elapsed_seconds = global_ctx->merge_list_element_ptr->watch.elapsedSeconds(); LOG_DEBUG(ctx->log, "Merge sorted {} rows, containing {} columns ({} merged, {} gathered) in {} sec., {} rows/sec., {}/sec.", @@ -906,12 +957,29 @@ bool MergeTask::MergeProjectionsStage::finalizeProjectionsAndWholeMerge() const return false; } +MergeTask::StageRuntimeContextPtr MergeTask::MergeProjectionsStage::getContextForNextStage() +{ + /// Do not increment for projection stage because time is already accounted in main task. + /// The projection stage has its own empty projection stage which may add a drift of several milliseconds. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeProjectionStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } + + return nullptr; +} bool MergeTask::VerticalMergeStage::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -920,9 +988,14 @@ bool MergeTask::VerticalMergeStage::execute() bool MergeTask::MergeProjectionsStage::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -969,12 +1042,26 @@ bool MergeTask::VerticalMergeStage::executeVerticalMergeForAllColumns() const bool MergeTask::execute() { - assert(stages_iterator != stages.end()); - if ((*stages_iterator)->execute()) + chassert(stages_iterator != stages.end()); + const auto & current_stage = *stages_iterator; + + if (current_stage->execute()) return true; - /// Stage is finished, need initialize context for the next stage - auto next_stage_context = (*stages_iterator)->getContextForNextStage(); + /// Stage is finished, need to initialize context for the next stage and update profile events. + + UInt64 current_elapsed_ms = global_ctx->merge_list_element_ptr->watch.elapsedMilliseconds(); + UInt64 stage_elapsed_ms = current_elapsed_ms - global_ctx->prev_elapsed_ms; + global_ctx->prev_elapsed_ms = current_elapsed_ms; + + auto next_stage_context = current_stage->getContextForNextStage(); + + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(current_stage->getTotalTimeProfileEvent(), stage_elapsed_ms); + ProfileEvents::increment(ProfileEvents::MergeTotalMilliseconds, stage_elapsed_ms); + } /// Move to the next stage in an array of stages ++stages_iterator; @@ -1099,7 +1186,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() /* limit_= */0, /* always_read_till_end_= */false, ctx->rows_sources_write_buf.get(), - true, ctx->blocks_are_granules_size); break; diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 8b0f2130e8e..9a68b2e04ac 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -26,6 +27,12 @@ #include #include +namespace ProfileEvents +{ + extern const Event MergeHorizontalStageTotalMilliseconds; + extern const Event MergeVerticalStageTotalMilliseconds; + extern const Event MergeProjectionStageTotalMilliseconds; +} namespace DB { @@ -134,6 +141,7 @@ private: { virtual void setRuntimeContext(StageRuntimeContextPtr local, StageRuntimeContextPtr global) = 0; virtual StageRuntimeContextPtr getContextForNextStage() = 0; + virtual ProfileEvents::Event getTotalTimeProfileEvent() const = 0; virtual bool execute() = 0; virtual ~IStage() = default; }; @@ -195,6 +203,7 @@ private: bool need_prefix; scope_guard temporary_directory_lock; + UInt64 prev_elapsed_ms{0}; }; using GlobalRuntimeContextPtr = std::shared_ptr; @@ -233,6 +242,7 @@ private: /// Dependencies for next stages std::list::const_iterator it_name_and_type; bool need_sync{false}; + UInt64 elapsed_execute_ns{0}; }; using ExecuteAndFinalizeHorizontalPartRuntimeContextPtr = std::shared_ptr; @@ -244,6 +254,7 @@ private: bool prepare(); bool executeImpl(); + void finalize() const; /// NOTE: Using pointer-to-member instead of std::function and lambda makes stacktraces much more concise and readable using ExecuteAndFinalizeHorizontalPartSubtasks = std::array; @@ -256,7 +267,6 @@ private: ExecuteAndFinalizeHorizontalPartSubtasks::const_iterator subtasks_iterator = subtasks.begin(); - MergeAlgorithm chooseMergeAlgorithm() const; void createMergedStream(); void extractMergingAndGatheringColumns() const; @@ -268,6 +278,7 @@ private: } StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeHorizontalStageTotalMilliseconds; } ExecuteAndFinalizeHorizontalPartRuntimeContextPtr ctx; GlobalRuntimeContextPtr global_ctx; @@ -307,6 +318,7 @@ private: QueryPipeline column_parts_pipeline; std::unique_ptr executor; std::unique_ptr rows_sources_read_buf{nullptr}; + UInt64 elapsed_execute_ns{0}; }; using VerticalMergeRuntimeContextPtr = std::shared_ptr; @@ -321,6 +333,7 @@ private: global_ctx = static_pointer_cast(global); } StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeVerticalStageTotalMilliseconds; } bool prepareVerticalMergeForAllColumns() const; bool executeVerticalMergeForAllColumns() const; @@ -361,6 +374,7 @@ private: MergeTasks::iterator projections_iterator; LoggerPtr log{getLogger("MergeTask::MergeProjectionsStage")}; + UInt64 elapsed_execute_ns{0}; }; using MergeProjectionsRuntimeContextPtr = std::shared_ptr; @@ -368,12 +382,15 @@ private: struct MergeProjectionsStage : public IStage { bool execute() override; + void setRuntimeContext(StageRuntimeContextPtr local, StageRuntimeContextPtr global) override { ctx = static_pointer_cast(local); global_ctx = static_pointer_cast(global); } - StageRuntimeContextPtr getContextForNextStage() override { return nullptr; } + + StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeProjectionStageTotalMilliseconds; } bool mergeMinMaxIndexAndPrepareProjections() const; bool executeProjections() const; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 52d12c9db7d..f4be7619fc8 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -164,7 +164,7 @@ void writeColumnSingleGranule( serialize_settings.position_independent_encoding = true; serialize_settings.low_cardinality_max_dictionary_size = 0; serialize_settings.use_compact_variant_discriminators_serialization = settings.use_compact_variant_discriminators_serialization; - serialize_settings.dynamic_write_statistics = ISerialization::SerializeBinaryBulkSettings::DynamicStatisticsMode::PREFIX; + serialize_settings.object_and_dynamic_write_statistics = ISerialization::SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::PREFIX; serialization->serializeBinaryBulkStatePrefix(*column.column, serialize_settings, state); serialization->serializeBinaryBulkWithMultipleStreams(*column.column, from_row, number_of_rows, serialize_settings, state); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 3fbabe1dd52..3edcce74b09 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -132,6 +132,10 @@ void MergeTreeDataPartWriterWide::addStreams( { assert(!substream_path.empty()); + /// Don't create streams for ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) + return; + auto full_stream_name = ISerialization::getFileNameForStream(name_and_type, substream_path); String stream_name; @@ -205,6 +209,10 @@ ISerialization::OutputStreamGetter MergeTreeDataPartWriterWide::createStreamGett { return [&, this] (const ISerialization::SubstreamPath & substream_path) -> WriteBuffer * { + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) + return nullptr; + bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; auto stream_name = getStreamName(column, substream_path); @@ -367,6 +375,10 @@ StreamsWithMarks MergeTreeDataPartWriterWide::getCurrentMarksForColumn( min_compress_block_size = settings.min_compress_block_size; getSerialization(name_and_type.name)->enumerateStreams([&] (const ISerialization::SubstreamPath & substream_path) { + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) + return; + bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; auto stream_name = getStreamName(name_and_type, substream_path); @@ -405,6 +417,10 @@ void MergeTreeDataPartWriterWide::writeSingleGranule( /// So that instead of the marks pointing to the end of the compressed block, there were marks pointing to the beginning of the next one. serialization->enumerateStreams([&] (const ISerialization::SubstreamPath & substream_path) { + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) + return; + bool is_offsets = !substream_path.empty() && substream_path.back().type == ISerialization::Substream::ArraySizes; auto stream_name = getStreamName(name_and_type, substream_path); @@ -656,7 +672,7 @@ void MergeTreeDataPartWriterWide::fillDataChecksums(MergeTreeDataPartChecksums & if (!serialization_states.empty()) { serialize_settings.getter = createStreamGetter(*it, written_offset_columns ? *written_offset_columns : offset_columns); - serialize_settings.dynamic_write_statistics = ISerialization::SerializeBinaryBulkSettings::DynamicStatisticsMode::SUFFIX; + serialize_settings.object_and_dynamic_write_statistics = ISerialization::SerializeBinaryBulkSettings::ObjectAndDynamicStatisticsMode::SUFFIX; getSerialization(it->name)->serializeBinaryBulkStateSuffix(serialize_settings, serialization_states[it->name]); } diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 59f3a299c99..58b23152016 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -369,7 +369,7 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( /// If sample and final are used together no need to calculate sampling expression twice. /// The first time it was calculated for final, because sample key is a part of the PK. /// So, assume that we already have calculated column. - ASTPtr sampling_key_ast = metadata_snapshot->getSamplingKeyAST(); + ASTPtr sampling_key_ast; if (final) { @@ -377,6 +377,12 @@ MergeTreeDataSelectSamplingData MergeTreeDataSelectExecutor::getSampling( /// We do spoil available_real_columns here, but it is not used later. available_real_columns.emplace_back(sampling_key.column_names[0], std::move(sampling_column_type)); } + else + { + sampling_key_ast = metadata_snapshot->getSamplingKeyAST()->clone(); + } + + chassert(sampling_key_ast != nullptr); if (has_lower_limit) { diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index 2a45ab1d927..2b924284857 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -103,8 +103,19 @@ size_t MergeTreeIndexGranularity::countMarksForRows(size_t from_mark, size_t num /// This is a heuristic to respect min_marks_to_read which is ignored by MergeTreeReadPool in case of remote disk. /// See comment in IMergeTreeSelectAlgorithm. - if (min_marks_to_read && from_mark + 2 * min_marks_to_read <= to_mark) - to_mark = from_mark + min_marks_to_read; + if (min_marks_to_read) + { + // check overflow + size_t min_marks_to_read_2 = 0; + bool overflow = common::mulOverflow(min_marks_to_read, 2, min_marks_to_read_2); + + size_t to_mark_overwrite = 0; + if (!overflow) + overflow = common::addOverflow(from_mark, min_marks_to_read_2, to_mark_overwrite); + + if (!overflow && to_mark_overwrite < to_mark) + to_mark = to_mark_overwrite; + } return getRowsCountInRange(from_mark, std::max(1UL, to_mark)) - offset_in_rows; } diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 5b0793fa0c8..4c0da28c3c4 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -2,9 +2,6 @@ #if USE_USEARCH -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wpass-failed" - #include #include #include @@ -46,18 +43,19 @@ namespace { /// The only indexing method currently supported by USearch -std::set methods = {"hnsw"}; +const std::set methods = {"hnsw"}; /// Maps from user-facing name to internal name -std::unordered_map distanceFunctionToMetricKind = { +const std::unordered_map distanceFunctionToMetricKind = { {"L2Distance", unum::usearch::metric_kind_t::l2sq_k}, {"cosineDistance", unum::usearch::metric_kind_t::cos_k}}; /// Maps from user-facing name to internal name -std::unordered_map quantizationToScalarKind = { +const std::unordered_map quantizationToScalarKind = { {"f32", unum::usearch::scalar_kind_t::f32_k}, {"f16", unum::usearch::scalar_kind_t::f16_k}, {"i8", unum::usearch::scalar_kind_t::i8_k}}; +/// Usearch provides more quantizations but ^^ above ones seem the only ones comprehensively supported across all distance functions. template concept is_set = std::same_as>; @@ -95,9 +93,16 @@ USearchIndexWithSerialization::USearchIndexWithSerialization( unum::usearch::metric_kind_t metric_kind, unum::usearch::scalar_kind_t scalar_kind, UsearchHnswParams usearch_hnsw_params) - : Base(Base::make(unum::usearch::metric_punned_t(dimensions, metric_kind, scalar_kind), - unum::usearch::index_dense_config_t(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search))) { + USearchIndex::metric_t metric(dimensions, metric_kind, scalar_kind); + + unum::usearch::index_dense_config_t config(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search); + config.enable_key_lookups = false; /// we don't do row-to-vector lookups + + if (auto result = USearchIndex::make(metric, config); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not create vector similarity index. Error: {}", String(result.error.release())); + else + swap(result.index); } void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const @@ -108,9 +113,8 @@ void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const return true; }; - auto result = Base::save_to_stream(callback); - if (result.error) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not save vector similarity index, error: " + String(result.error.release())); + if (auto result = Base::save_to_stream(callback); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not save vector similarity index. Error: {}", String(result.error.release())); } void USearchIndexWithSerialization::deserialize(ReadBuffer & istr) @@ -121,26 +125,43 @@ void USearchIndexWithSerialization::deserialize(ReadBuffer & istr) return true; }; - auto result = Base::load_from_stream(callback); - if (result.error) + if (auto result = Base::load_from_stream(callback); !result) /// See the comment in MergeTreeIndexGranuleVectorSimilarity::deserializeBinary why we throw here - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index, error: " + String(result.error.release()) + " Please drop the index and create it again."); + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index. Please drop the index and create it again. Error: {}", String(result.error.release())); + + if (!try_reserve(limits())) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for usearch index"); } USearchIndexWithSerialization::Statistics USearchIndexWithSerialization::getStatistics() const { + USearchIndex::stats_t global_stats = Base::stats(); + Statistics statistics = { .max_level = max_level(), .connectivity = connectivity(), - .size = size(), /// number of vectors - .capacity = capacity(), /// number of vectors reserved - .memory_usage = memory_usage(), /// in bytes, the value is not exact + .size = size(), + .capacity = capacity(), + .memory_usage = memory_usage(), .bytes_per_vector = bytes_per_vector(), .scalar_words = scalar_words(), - .statistics = stats()}; + .nodes = global_stats.nodes, + .edges = global_stats.edges, + .max_edges = global_stats.max_edges, + .level_stats = {}}; + + for (size_t i = 0; i < statistics.max_level; ++i) + statistics.level_stats.push_back(Base::stats(i)); + return statistics; } +String USearchIndexWithSerialization::Statistics::toString() const +{ + return fmt::format("max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}, bytes_per_vector = {}, scalar_words = {}, nodes = {}, edges = {}, max_edges = {}", + max_level, connectivity, size, capacity, ReadableSize(memory_usage), bytes_per_vector, scalar_words, nodes, edges, max_edges); + +} MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, const Block & index_sample_block_, @@ -169,6 +190,8 @@ MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr) const { + LOG_TRACE(logger, "Start writing vector similarity index"); + if (empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty minmax index {}", backQuote(index_name)); @@ -181,12 +204,13 @@ void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr) index->serialize(ostr); auto statistics = index->getStatistics(); - LOG_TRACE(logger, "Wrote vector similarity index: max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}", - statistics.max_level, statistics.connectivity, statistics.size, statistics.capacity, ReadableSize(statistics.memory_usage)); + LOG_TRACE(logger, "Wrote vector similarity index: {}", statistics.toString()); } void MergeTreeIndexGranuleVectorSimilarity::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/) { + LOG_TRACE(logger, "Start loading vector similarity index"); + UInt64 file_version; readIntBinary(file_version, istr); if (file_version != FILE_FORMAT_VERSION) @@ -204,8 +228,7 @@ void MergeTreeIndexGranuleVectorSimilarity::deserializeBinary(ReadBuffer & istr, index->deserialize(istr); auto statistics = index->getStatistics(); - LOG_TRACE(logger, "Loaded vector similarity index: max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}", - statistics.max_level, statistics.connectivity, statistics.size, statistics.capacity, ReadableSize(statistics.memory_usage)); + LOG_TRACE(logger, "Loaded vector similarity index: {}", statistics.toString()); } MergeTreeIndexAggregatorVectorSimilarity::MergeTreeIndexAggregatorVectorSimilarity( @@ -229,14 +252,47 @@ MergeTreeIndexGranulePtr MergeTreeIndexAggregatorVectorSimilarity::getGranuleAnd return granule; } +namespace +{ + +template +void updateImpl(const ColumnArray * column_array, const ColumnArray::Offsets & column_array_offsets, USearchIndexWithSerializationPtr & index, size_t dimensions, size_t rows) +{ + const auto & column_array_data = column_array->getData(); + const auto & column_array_data_float = typeid_cast(column_array_data); + const auto & column_array_data_float_data = column_array_data_float.getData(); + + /// Check all sizes are the same + for (size_t row = 0; row < rows - 1; ++row) + if (column_array_offsets[row + 1] - column_array_offsets[row] != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); + + /// Reserving space is mandatory + if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + rows))) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); + + for (size_t row = 0; row < rows; ++row) + { + if (auto result = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release())); + else + { + ProfileEvents::increment(ProfileEvents::USearchAddCount); + ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members); + ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances); + } + } +} + +} + void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_t * pos, size_t limit) { if (*pos >= block.rows()) throw Exception( ErrorCodes::LOGICAL_ERROR, "The provided position is not less than the number of block rows. Position: {}, Block rows: {}.", - *pos, - block.rows()); + *pos, block.rows()); size_t rows_read = std::min(limit, block.rows() - *pos); @@ -250,58 +306,53 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); const String & index_column_name = index_sample_block.getByPosition(0).name; - ColumnPtr column_cut = block.getByName(index_column_name).column->cut(*pos, rows_read); + const ColumnPtr & index_column = block.getByName(index_column_name).column; + ColumnPtr column_cut = index_column->cut(*pos, rows_read); - if (const auto & column_array = typeid_cast(column_cut.get())) - { - const auto & column_array_data = column_array->getData(); - const auto & column_array_data_float = typeid_cast(column_array_data); - const auto & column_array_data_float_data = column_array_data_float.getData(); + const auto * column_array = typeid_cast(column_cut.get()); + if (!column_array) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float*) column"); - const auto & column_array_offsets = column_array->getOffsets(); - const size_t num_rows = column_array_offsets.size(); + if (column_array->empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); - if (column_array->empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); + /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays + /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default + /// values which is also empty. + if (column_array->isDefaultAt(0)) + throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); - /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays - /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default - /// values which is also empty. - if (column_array->isDefaultAt(0)) - throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); + const size_t rows = column_array->size(); - /// Check all sizes are the same - const size_t dimensions = column_array_offsets[0]; - for (size_t i = 0; i < num_rows - 1; ++i) - if (column_array_offsets[i + 1] - column_array_offsets[i] != dimensions) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); + const auto & column_array_offsets = column_array->getOffsets(); + const size_t dimensions = column_array_offsets[0]; - /// Also check that previously inserted blocks have the same size as this block. - /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across - /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. - if (index && index->dimensions() != dimensions) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); + if (!index) + index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); - if (!index) - index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); + /// Also check that previously inserted blocks have the same size as this block. + /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across + /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. + if (index->dimensions() != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); - /// Reserving space is mandatory - if (!index->reserve(roundUpToPowerOfTwoOrZero(index->size() + num_rows))) - throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); + /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp + if (index->size() + rows > std::numeric_limits::max()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); - for (size_t row = 0; row < num_rows; ++row) - { - auto rc = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); - if (!rc) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index, error: " + String(rc.error.release())); + DataTypePtr data_type = block.getDataTypes()[0]; + const auto * data_type_array = typeid_cast(data_type.get()); + if (!data_type_array) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); + const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); - ProfileEvents::increment(ProfileEvents::USearchAddCount); - ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, rc.visited_members); - ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, rc.computed_distances); - } - } + if (WhichDataType(nested_type_index).isFloat32()) + updateImpl(column_array, column_array_offsets, index, dimensions, rows); + else if (WhichDataType(nested_type_index).isFloat64()) + updateImpl(column_array, column_array_offsets, index, dimensions, rows); else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float32) column"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); + *pos += rows_read; } @@ -349,19 +400,18 @@ std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(Mer "does not match the dimension in the index ({})", vector_similarity_condition.getDimensions(), index->dimensions()); - const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); + const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); - auto result = index->search(reference_vector.data(), limit); - if (result.error) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, "Could not search in vector similarity index, error: " + String(result.error.release())); + auto search_result = index->search(reference_vector.data(), limit); + if (!search_result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not search in vector similarity index. Error: {}", String(search_result.error.release())); ProfileEvents::increment(ProfileEvents::USearchSearchCount); - ProfileEvents::increment(ProfileEvents::USearchSearchVisitedMembers, result.visited_members); - ProfileEvents::increment(ProfileEvents::USearchSearchComputedDistances, result.computed_distances); + ProfileEvents::increment(ProfileEvents::USearchSearchVisitedMembers, search_result.visited_members); + ProfileEvents::increment(ProfileEvents::USearchSearchComputedDistances, search_result.computed_distances); - std::vector neighbors(result.size()); /// indexes of dots which were closest to the reference vector - std::vector distances(result.size()); - result.dump_to(neighbors.data(), distances.data()); + std::vector neighbors(search_result.size()); /// indexes of vectors which were closest to the reference vector + search_result.dump_to(neighbors.data()); std::vector granules; granules.reserve(neighbors.size()); @@ -409,14 +459,13 @@ MergeTreeIndexConditionPtr MergeTreeIndexVectorSimilarity::createIndexCondition( MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index) { - const bool has_six_args = (index.arguments.size() == 6); - + /// Default parameters: unum::usearch::metric_kind_t metric_kind = distanceFunctionToMetricKind.at(index.arguments[1].safeGet()); - - /// use defaults for the other parameters unum::usearch::scalar_kind_t scalar_kind = unum::usearch::scalar_kind_t::f32_k; UsearchHnswParams usearch_hnsw_params; + /// Optional parameters: + const bool has_six_args = (index.arguments.size() == 6); if (has_six_args) { scalar_kind = quantizationToScalarKind.at(index.arguments[2].safeGet()); @@ -461,30 +510,30 @@ void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* atta { if (!quantizationToScalarKind.contains(index.arguments[2].safeGet())) throw Exception(ErrorCodes::INCORRECT_DATA, "Third argument (quantization) of vector similarity index is not supported. Supported quantizations are: {}", joinByComma(quantizationToScalarKind)); - if (index.arguments[3].safeGet() < 2) - throw Exception(ErrorCodes::INCORRECT_DATA, "Fourth argument (M) of vector similarity index must be > 1"); - if (index.arguments[4].safeGet() < 1) - throw Exception(ErrorCodes::INCORRECT_DATA, "Fifth argument (ef_construction) of vector similarity index must be > 0"); - if (index.arguments[5].safeGet() < 1) - throw Exception(ErrorCodes::INCORRECT_DATA, "Sixth argument (ef_search) of vector similarity index must be > 0"); + + /// Call Usearch's own parameter validation method for HNSW-specific parameters + UInt64 m = index.arguments[3].safeGet(); + UInt64 ef_construction = index.arguments[4].safeGet(); + UInt64 ef_search = index.arguments[5].safeGet(); + + unum::usearch::index_dense_config_t config(m, ef_construction, ef_search); + + if (auto error = config.validate(); error) + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid parameters passed to vector similarity index. Error: {}", String(error.release())); } /// Check that the index is created on a single column if (index.column_names.size() != 1 || index.data_types.size() != 1) throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Vector similarity indexes must be created on a single column"); - /// Check data type of the indexed column: + /// Check that the data type is Array(Float*) DataTypePtr data_type = index.sample_block.getDataTypes()[0]; - if (const auto * data_type_array = typeid_cast(data_type.get())) - { - TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); - if (!WhichDataType(nested_type_index).isFloat32()) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); - } - else - { - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); - } + const auto * data_type_array = typeid_cast(data_type.get()); + if (!data_type_array) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float*)"); + TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); + if (!WhichDataType(nested_type_index).isFloat()) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float*)"); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h index f7098c1626c..c4c03254d2d 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h @@ -4,12 +4,9 @@ #if USE_USEARCH -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wpass-failed" -# include -# include -# include -#pragma clang diagnostic pop +#include +#include +#include namespace DB { @@ -21,7 +18,7 @@ struct UsearchHnswParams size_t ef_search = unum::usearch::default_expansion_search(); }; -using USearchIndex = unum::usearch::index_dense_gt; +using USearchIndex = unum::usearch::index_dense_t; class USearchIndexWithSerialization : public USearchIndex { @@ -41,13 +38,18 @@ public: { size_t max_level; size_t connectivity; - size_t size; - size_t capacity; - size_t memory_usage; - /// advanced stats: + size_t size; /// number of indexed vectors + size_t capacity; /// reserved number of indexed vectors + size_t memory_usage; /// byte size (not exact) size_t bytes_per_vector; size_t scalar_words; - Base::stats_t statistics; + size_t nodes; + size_t edges; + size_t max_edges; + + std::vector level_stats; /// for debugging, excluded from getStatistics() + + String toString() const; }; Statistics getStatistics() const; diff --git a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp index a9b77fb6c03..7081eb716f5 100644 --- a/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreePrefetchedReadPool.cpp @@ -395,6 +395,10 @@ void MergeTreePrefetchedReadPool::fillPerThreadTasks(size_t threads, size_t sum_ part_stat.prefetch_step_marks = std::max(part_stat.prefetch_step_marks, per_part_infos[i]->min_marks_per_task); + if (part_stat.prefetch_step_marks == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + LOG_DEBUG( log, "Part: {}, sum_marks: {}, approx mark size: {}, prefetch_step_bytes: {}, prefetch_step_marks: {}, (ranges: {})", diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index cc321cd5a4d..23c314e48f5 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -24,6 +24,7 @@ namespace ErrorCodes { extern const int CANNOT_SCHEDULE_TASK; extern const int LOGICAL_ERROR; +extern const int BAD_ARGUMENTS; } MergeTreeReadPool::MergeTreeReadPool( @@ -235,6 +236,10 @@ void MergeTreeReadPool::fillPerThreadInfo(size_t threads, size_t sum_marks) const auto part_idx = current_parts.back().part_idx; const auto min_marks_per_task = per_part_infos[part_idx]->min_marks_per_task; + if (min_marks_per_task == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + /// Do not get too few rows from part. if (marks_in_part >= min_marks_per_task && need_marks < min_marks_per_task) need_marks = min_marks_per_task; diff --git a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp index 6d2560bc9c7..95a10454f9e 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolBase.cpp @@ -85,6 +85,7 @@ static size_t calculateMinMarksPerTask( min_marks_per_task = heuristic_min_marks; } } + LOG_TEST(&Poco::Logger::get("MergeTreeReadPoolBase"), "Will use min_marks_per_task={}", min_marks_per_task); return min_marks_per_task; } diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp index 33eaf5a49bd..d23072771f2 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicas.cpp @@ -8,6 +8,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas( @@ -38,6 +39,10 @@ MergeTreeReadPoolParallelReplicas::MergeTreeReadPoolParallelReplicas( for (const auto & info : per_part_infos) min_marks_per_task = std::max(min_marks_per_task, info->min_marks_per_task); + if (min_marks_per_task == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + extension.all_callback( InitialAllRangesAnnouncement(coordination_mode, parts_ranges.getDescriptions(), extension.number_of_current_replica)); } diff --git a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp index 6b5cf978423..42ffc4304b2 100644 --- a/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPoolParallelReplicasInOrder.cpp @@ -6,6 +6,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int BAD_ARGUMENTS; } MergeTreeReadPoolParallelReplicasInOrder::MergeTreeReadPoolParallelReplicasInOrder( @@ -37,6 +38,10 @@ MergeTreeReadPoolParallelReplicasInOrder::MergeTreeReadPoolParallelReplicasInOrd for (const auto & info : per_part_infos) min_marks_per_task = std::max(min_marks_per_task, info->min_marks_per_task); + if (min_marks_per_task == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + for (const auto & part : parts_ranges) request.push_back({part.data_part->info, MarkRanges{}}); diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index 69dc2e4b2bb..7451374070c 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -234,7 +234,7 @@ void MergeTreeReaderCompact::readPrefix( serialization = getSerializationInPart(name_and_type); deserialize_settings.getter = buffer_getter; - deserialize_settings.dynamic_read_statistics = true; + deserialize_settings.object_and_dynamic_read_statistics = true; serialization->deserializeBinaryBulkStatePrefix(deserialize_settings, deserialize_binary_bulk_state_map[name_and_type.name], nullptr); } catch (Exception & e) diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index b6882fdced9..898bf5a2933 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -213,6 +213,10 @@ void MergeTreeReaderWide::addStreams( ISerialization::StreamCallback callback = [&] (const ISerialization::SubstreamPath & substream_path) { + /// Don't create streams for ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) + return; + auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(name_and_type, substream_path, data_part_info_for_read->getChecksums()); /** If data file is missing then we will not try to open it. @@ -326,7 +330,7 @@ void MergeTreeReaderWide::deserializePrefix( if (!deserialize_binary_bulk_state_map.contains(name)) { ISerialization::DeserializeBinaryBulkSettings deserialize_settings; - deserialize_settings.dynamic_read_statistics = true; + deserialize_settings.object_and_dynamic_read_statistics = true; deserialize_settings.getter = [&](const ISerialization::SubstreamPath & substream_path) { return getStream(/* seek_to_start = */true, substream_path, data_part_info_for_read->getChecksums(), name_and_type, 0, /* seek_to_mark = */false, current_task_last_mark, cache); @@ -348,6 +352,10 @@ void MergeTreeReaderWide::prefetchForColumn( deserializePrefix(serialization, name_and_type, current_task_last_mark, cache, deserialize_states_cache); auto callback = [&](const ISerialization::SubstreamPath & substream_path) { + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) + return; + auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(name_and_type, substream_path, data_part_info_for_read->getChecksums()); if (stream_name && !prefetched_streams.contains(*stream_name)) diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 5ba1988cc5d..de1f0f60cfc 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -84,6 +84,7 @@ struct Settings; M(Bool, exclude_deleted_rows_for_part_size_in_merge, false, "Use an estimated source part size (excluding lightweight deleted rows) when selecting parts to merge", 0) \ M(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \ M(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \ + M(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \ \ /** Inserts settings. */ \ M(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ @@ -215,6 +216,7 @@ struct Settings; M(Float, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns, 0.9f, "If the value of a column of the primary key in data part changes at least in this ratio of times, skip loading next columns in memory. This allows to save memory usage by not loading useless columns of the primary key.", 0) \ /** Projection settings. */ \ M(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ + M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts, or rebuild the projections.", 0) \ M(DeduplicateMergeProjectionMode, deduplicate_merge_projection_mode, DeduplicateMergeProjectionMode::THROW, "Whether to allow create projection for the table with non-classic MergeTree, if allowed, what is the action when merge, drop or rebuild.", 0) \ #define MAKE_OBSOLETE_MERGE_TREE_SETTING(M, TYPE, NAME, DEFAULT) \ diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 73084f487b9..56f68fd265a 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -254,6 +254,7 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit LOG_ERROR(log, "{}. Data after mutation is not byte-identical to data on another replicas. " "We will download merged part from replica to force byte-identical result.", getCurrentExceptionMessage(false)); + mutate_task->updateProfileEvents(); write_part_log(ExecutionStatus::fromCurrentException("", true)); if (storage.getSettings()->detach_not_byte_identical_parts) @@ -281,6 +282,7 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit */ finish_callback = [storage_ptr = &storage]() { storage_ptr->merge_selecting_task->schedule(); }; ProfileEvents::increment(ProfileEvents::ReplicatedPartMutations); + mutate_task->updateProfileEvents(); write_part_log({}); return true; diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 9aec074deae..10461eb5942 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -102,6 +102,7 @@ bool MutatePlainMergeTreeTask::executeStep() transaction.commit(); storage.updateMutationEntriesErrors(future_part, true, ""); + mutate_task->updateProfileEvents(); write_part_log({}); state = State::NEED_FINISH; @@ -114,6 +115,7 @@ bool MutatePlainMergeTreeTask::executeStep() PreformattedMessage exception_message = getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false); LOG_ERROR(getLogger("MutatePlainMergeTreeTask"), exception_message); storage.updateMutationEntriesErrors(future_part, false, exception_message.text); + mutate_task->updateProfileEvents(); write_part_log(ExecutionStatus::fromCurrentException("", true)); tryLogCurrentException(__PRETTY_FUNCTION__); return false; diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 5fcf699de59..40648439887 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -38,7 +38,13 @@ namespace ProfileEvents { -extern const Event MutateTaskProjectionsCalculationMicroseconds; + extern const Event MutationTotalParts; + extern const Event MutationUntouchedParts; + extern const Event MutationTotalMilliseconds; + extern const Event MutationExecuteMilliseconds; + extern const Event MutationAllPartColumns; + extern const Event MutationSomePartColumns; + extern const Event MutateTaskProjectionsCalculationMicroseconds; } namespace CurrentMetrics @@ -659,7 +665,7 @@ static NameSet collectFilesToSkip( const Block & updated_header, const std::set & indices_to_recalc, const String & mrk_extension, - const std::set & projections_to_recalc, + const std::set & projections_to_skip, const std::set & stats_to_recalc) { NameSet files_to_skip = source_part->getFileNamesWithoutChecksums(); @@ -684,7 +690,7 @@ static NameSet collectFilesToSkip( } } - for (const auto & projection : projections_to_recalc) + for (const auto & projection : projections_to_skip) files_to_skip.insert(projection->getDirectoryName()); for (const auto & stat : stats_to_recalc) @@ -1046,6 +1052,7 @@ struct MutationContext /// Whether we need to count lightweight delete rows in this mutation bool count_lightweight_deleted_rows; + UInt64 execute_elapsed_ns = 0; }; using MutationContextPtr = std::shared_ptr; @@ -1250,6 +1257,8 @@ public: private: void prepare(); bool mutateOriginalPartAndPrepareProjections(); + void writeTempProjectionPart(size_t projection_idx, Chunk chunk); + void finalizeTempProjections(); bool iterateThroughAllProjections(); void constructTaskForProjectionPartsMerge(); void finalize(); @@ -1300,10 +1309,22 @@ void PartMergerWriter::prepare() bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() { - Block cur_block; - Block projection_header; - if (MutationHelpers::checkOperationIsNotCanceled(*ctx->merges_blocker, ctx->mutate_entry) && ctx->mutating_executor->pull(cur_block)) + Stopwatch watch(CLOCK_MONOTONIC_COARSE); + UInt64 step_time_ms = ctx->data->getSettings()->background_task_preferred_step_execution_time_ms.totalMilliseconds(); + + do { + Block cur_block; + Block projection_header; + + MutationHelpers::checkOperationIsNotCanceled(*ctx->merges_blocker, ctx->mutate_entry); + + if (!ctx->mutating_executor->pull(cur_block)) + { + finalizeTempProjections(); + return false; + } + if (ctx->minmax_idx) ctx->minmax_idx->update(cur_block, MergeTreeData::getMinMaxColumnsNames(ctx->metadata_snapshot->getPartitionKey())); @@ -1315,46 +1336,56 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() for (size_t i = 0, size = ctx->projections_to_build.size(); i < size; ++i) { - const auto & projection = *ctx->projections_to_build[i]; + Chunk squashed_chunk; - ProfileEventTimeIncrement watch(ProfileEvents::MutateTaskProjectionsCalculationMicroseconds); - Block block_to_squash = projection.calculate(cur_block, ctx->context); - projection_squashes[i].setHeader(block_to_squash.cloneEmpty()); - - Chunk squashed_chunk = Squashing::squash(projection_squashes[i].add({block_to_squash.getColumns(), block_to_squash.rows()})); - if (squashed_chunk) { - auto result = projection_squashes[i].getHeader().cloneWithColumns(squashed_chunk.detachColumns()); - auto tmp_part = MergeTreeDataWriter::writeTempProjectionPart( - *ctx->data, ctx->log, result, projection, ctx->new_data_part.get(), ++block_num); - tmp_part.finalize(); - tmp_part.part->getDataPartStorage().commitTransaction(); - projection_parts[projection.name].emplace_back(std::move(tmp_part.part)); + ProfileEventTimeIncrement projection_watch(ProfileEvents::MutateTaskProjectionsCalculationMicroseconds); + Block block_to_squash = ctx->projections_to_build[i]->calculate(cur_block, ctx->context); + + projection_squashes[i].setHeader(block_to_squash.cloneEmpty()); + squashed_chunk = Squashing::squash(projection_squashes[i].add({block_to_squash.getColumns(), block_to_squash.rows()})); } + + if (squashed_chunk) + writeTempProjectionPart(i, std::move(squashed_chunk)); } (*ctx->mutate_entry)->rows_written += cur_block.rows(); (*ctx->mutate_entry)->bytes_written_uncompressed += cur_block.bytes(); + } while (watch.elapsedMilliseconds() < step_time_ms); - /// Need execute again - return true; - } + /// Need execute again + return true; +} +void PartMergerWriter::writeTempProjectionPart(size_t projection_idx, Chunk chunk) +{ + const auto & projection = *ctx->projections_to_build[projection_idx]; + const auto & projection_plan = projection_squashes[projection_idx]; + + auto result = projection_plan.getHeader().cloneWithColumns(chunk.detachColumns()); + + auto tmp_part = MergeTreeDataWriter::writeTempProjectionPart( + *ctx->data, + ctx->log, + result, + projection, + ctx->new_data_part.get(), + ++block_num); + + tmp_part.finalize(); + tmp_part.part->getDataPartStorage().commitTransaction(); + projection_parts[projection.name].emplace_back(std::move(tmp_part.part)); +} + +void PartMergerWriter::finalizeTempProjections() +{ // Write the last block for (size_t i = 0, size = ctx->projections_to_build.size(); i < size; ++i) { - const auto & projection = *ctx->projections_to_build[i]; - auto & projection_squash_plan = projection_squashes[i]; - auto squashed_chunk = Squashing::squash(projection_squash_plan.flush()); + auto squashed_chunk = Squashing::squash(projection_squashes[i].flush()); if (squashed_chunk) - { - auto result = projection_squash_plan.getHeader().cloneWithColumns(squashed_chunk.detachColumns()); - auto temp_part = MergeTreeDataWriter::writeTempProjectionPart( - *ctx->data, ctx->log, result, projection, ctx->new_data_part.get(), ++block_num); - temp_part.finalize(); - temp_part.part->getDataPartStorage().commitTransaction(); - projection_parts[projection.name].emplace_back(std::move(temp_part.part)); - } + writeTempProjectionPart(i, std::move(squashed_chunk)); } projection_parts_iterator = std::make_move_iterator(projection_parts.begin()); @@ -1362,12 +1393,8 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections() /// Maybe there are no projections ? if (projection_parts_iterator != std::make_move_iterator(projection_parts.end())) constructTaskForProjectionPartsMerge(); - - /// Let's move on to the next stage - return false; } - void PartMergerWriter::constructTaskForProjectionPartsMerge() { auto && [name, parts] = *projection_parts_iterator; @@ -1554,6 +1581,10 @@ private: removed_projections.insert(command.column_name); } + bool lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); + bool lightweight_delete_drop = lightweight_delete_mode + && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP; + const auto & projections = ctx->metadata_snapshot->getProjections(); for (const auto & projection : projections) { @@ -1561,10 +1592,11 @@ private: continue; bool need_recalculate = - ctx->materialized_projections.contains(projection.name) + (ctx->materialized_projections.contains(projection.name) || (!is_full_part_storage && ctx->source_part->hasProjection(projection.name) - && !ctx->source_part->hasBrokenProjection(projection.name)); + && !ctx->source_part->hasBrokenProjection(projection.name))) + && !lightweight_delete_drop; if (need_recalculate) { @@ -1572,7 +1604,7 @@ private: } else { - if (ctx->source_part->checksums.has(projection.getDirectoryName())) + if (!lightweight_delete_mode && ctx->source_part->checksums.has(projection.getDirectoryName())) entries_to_hardlink.insert(projection.getDirectoryName()); } } @@ -2017,6 +2049,9 @@ MutateTask::MutateTask( bool MutateTask::execute() { + Stopwatch watch; + SCOPE_EXIT({ ctx->execute_elapsed_ns += watch.elapsedNanoseconds(); }); + switch (state) { case State::NEED_PREPARE: @@ -2050,6 +2085,15 @@ bool MutateTask::execute() return false; } +void MutateTask::updateProfileEvents() const +{ + UInt64 total_elapsed_ms = (*ctx->mutate_entry)->watch.elapsedMilliseconds(); + UInt64 execute_elapsed_ms = ctx->execute_elapsed_ns / 1000000UL; + + ProfileEvents::increment(ProfileEvents::MutationTotalMilliseconds, total_elapsed_ms); + ProfileEvents::increment(ProfileEvents::MutationExecuteMilliseconds, execute_elapsed_ms); +} + static bool canSkipConversionToNullable(const MergeTreeDataPartPtr & part, const MutationCommand & command) { if (command.type != MutationCommand::READ_COLUMN) @@ -2112,6 +2156,7 @@ static bool canSkipMutationCommandForPart(const MergeTreeDataPartPtr & part, con bool MutateTask::prepare() { + ProfileEvents::increment(ProfileEvents::MutationTotalParts); MutationHelpers::checkOperationIsNotCanceled(*ctx->merges_blocker, ctx->mutate_entry); if (ctx->future_part->parts.size() != 1) @@ -2174,6 +2219,7 @@ bool MutateTask::prepare() ctx->temporary_directory_lock = std::move(lock); } + ProfileEvents::increment(ProfileEvents::MutationUntouchedParts); promise.set_value(std::move(part)); return false; } @@ -2198,6 +2244,8 @@ bool MutateTask::prepare() ctx->stage_progress = std::make_unique(1.0); + bool lightweight_delete_mode = false; + if (!ctx->for_interpreter.empty()) { /// Always disable filtering in mutations: we want to read and write all rows because for updates we rewrite only some of the @@ -2215,6 +2263,21 @@ bool MutateTask::prepare() ctx->mutating_pipeline_builder = ctx->interpreter->execute(); ctx->updated_header = ctx->interpreter->getUpdatedHeader(); ctx->progress_callback = MergeProgressCallback((*ctx->mutate_entry)->ptr(), ctx->watch_prev_elapsed, *ctx->stage_progress); + + lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); + /// If under the condition of lightweight delete mode with rebuild option, add projections again here as we can only know + /// the condition as early as from here. + if (lightweight_delete_mode + && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::REBUILD) + { + for (const auto & projection : ctx->metadata_snapshot->getProjections()) + { + if (!ctx->source_part->hasProjection(projection.name)) + continue; + + ctx->materialized_projections.insert(projection.name); + } + } } auto single_disk_volume = std::make_shared("volume_" + ctx->future_part->name, ctx->space_reservation->getDisk(), 0); @@ -2256,7 +2319,7 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); - if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && ctx->updated_header.has(RowExistsColumn::name)) + if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && lightweight_delete_mode) { /// This mutation contains lightweight delete and we need to count the deleted rows, /// Reset existing_rows_count of new data part to 0 and it will be updated while writing _row_exists column @@ -2283,6 +2346,7 @@ bool MutateTask::prepare() ctx->new_data_part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::REMOVE_BLOBS; task = std::make_unique(ctx); + ProfileEvents::increment(ProfileEvents::MutationAllPartColumns); } else /// TODO: check that we modify only non-key columns in this case. { @@ -2293,10 +2357,30 @@ bool MutateTask::prepare() ctx->context, ctx->materialized_indices); - ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( - ctx->source_part, - ctx->metadata_snapshot, - ctx->materialized_projections); + auto lightweight_mutation_projection_mode = ctx->data->getSettings()->lightweight_mutation_projection_mode; + bool lightweight_delete_drops_projections = + lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP + || lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW; + + std::set projections_to_skip_container; + auto * projections_to_skip = &projections_to_skip_container; + + bool should_create_projections = !(lightweight_delete_mode && lightweight_delete_drops_projections); + /// Under lightweight delete mode, if option is drop, projections_to_recalc should be empty. + if (should_create_projections) + { + ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( + ctx->source_part, + ctx->metadata_snapshot, + ctx->materialized_projections); + + projections_to_skip = &ctx->projections_to_recalc; + } + else + { + for (const auto & projection : ctx->metadata_snapshot->getProjections()) + projections_to_skip->insert(&projection); + } ctx->stats_to_recalc = MutationHelpers::getStatisticsToRecalculate(ctx->metadata_snapshot, ctx->materialized_statistics); @@ -2306,7 +2390,7 @@ bool MutateTask::prepare() ctx->updated_header, ctx->indices_to_recalc, ctx->mrk_extension, - ctx->projections_to_recalc, + *projections_to_skip, ctx->stats_to_recalc); ctx->files_to_rename = MutationHelpers::collectFilesForRenames( @@ -2322,6 +2406,7 @@ bool MutateTask::prepare() ctx->new_data_part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::ASK_KEEPER; task = std::make_unique(ctx); + ProfileEvents::increment(ProfileEvents::MutationSomePartColumns); } return true; diff --git a/src/Storages/MergeTree/MutateTask.h b/src/Storages/MergeTree/MutateTask.h index dc22b90f0e9..08427bff6d8 100644 --- a/src/Storages/MergeTree/MutateTask.h +++ b/src/Storages/MergeTree/MutateTask.h @@ -39,6 +39,7 @@ public: bool need_prefix_); bool execute(); + void updateProfileEvents() const; std::future getFuture() { diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp index f46b4de10b7..ee47fe3549a 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp @@ -1004,6 +1004,10 @@ void ParallelReplicasReadingCoordinator::handleInitialAllRangesAnnouncement(Init ParallelReadResponse ParallelReplicasReadingCoordinator::handleRequest(ParallelReadRequest request) { + if (request.min_number_of_marks == 0) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Chosen number of marks to read is zero (likely because of weird interference of settings)"); + ProfileEventTimeIncrement watch(ProfileEvents::ParallelReplicasHandleRequestMicroseconds); std::lock_guard lock(mutex); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 05fd6f6915b..d3ccda904b6 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -49,6 +48,20 @@ ReplicatedMergeTreeRestartingThread::ReplicatedMergeTreeRestartingThread(Storage task = storage.getContext()->getSchedulePool().createTask(log_name, [this]{ run(); }); } +void ReplicatedMergeTreeRestartingThread::start(bool schedule) +{ + LOG_TRACE(log, "Starting the restating thread, schedule: {}", schedule); + if (schedule) + task->activateAndSchedule(); + else + task->activate(); +} + +void ReplicatedMergeTreeRestartingThread::wakeup() +{ + task->schedule(); +} + void ReplicatedMergeTreeRestartingThread::run() { if (need_stop) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index 01071d80e8b..d719505ae5e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -24,16 +24,9 @@ class ReplicatedMergeTreeRestartingThread public: explicit ReplicatedMergeTreeRestartingThread(StorageReplicatedMergeTree & storage_); - void start(bool schedule = true) - { - LOG_TRACE(log, "Starting restating thread, schedule: {}", schedule); - if (schedule) - task->activateAndSchedule(); - else - task->activate(); - } + void start(bool schedule); - void wakeup() { task->schedule(); } + void wakeup(); void shutdown(bool part_of_full_shutdown); diff --git a/src/Storages/MergeTree/VectorSimilarityCondition.cpp b/src/Storages/MergeTree/VectorSimilarityCondition.cpp index 2e53b4ecb3a..c8f33857640 100644 --- a/src/Storages/MergeTree/VectorSimilarityCondition.cpp +++ b/src/Storages/MergeTree/VectorSimilarityCondition.cpp @@ -24,7 +24,7 @@ namespace { template -void extractReferenceVectorFromLiteral(std::vector & reference_vector, Literal literal) +void extractReferenceVectorFromLiteral(std::vector & reference_vector, Literal literal) { Float64 float_element_of_reference_vector; Int64 int_element_of_reference_vector; @@ -72,7 +72,7 @@ UInt64 VectorSimilarityCondition::getLimit() const throw Exception(ErrorCodes::LOGICAL_ERROR, "No LIMIT section in query, not supported"); } -std::vector VectorSimilarityCondition::getReferenceVector() const +std::vector VectorSimilarityCondition::getReferenceVector() const { if (index_is_useful && query_information.has_value()) return query_information->reference_vector; diff --git a/src/Storages/MergeTree/VectorSimilarityCondition.h b/src/Storages/MergeTree/VectorSimilarityCondition.h index fd339ed715d..2380f8f46b0 100644 --- a/src/Storages/MergeTree/VectorSimilarityCondition.h +++ b/src/Storages/MergeTree/VectorSimilarityCondition.h @@ -60,7 +60,7 @@ public: L2 }; - std::vector reference_vector; + std::vector reference_vector; DistanceFunction distance_function; String column_name; UInt64 limit; @@ -70,7 +70,7 @@ public: /// Returns false if query can be speeded up by an ANN index, true otherwise. bool alwaysUnknownOrTrue(String distance_function) const; - std::vector getReferenceVector() const; + std::vector getReferenceVector() const; size_t getDimensions() const; String getColumnName() const; Info::DistanceFunction getDistanceFunction() const; diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index fb86d9e7603..3a22daa0011 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -215,6 +215,10 @@ static IMergeTreeDataPart::Checksums checkDataPart( { get_serialization(column)->enumerateStreams([&](const ISerialization::SubstreamPath & substream_path) { + /// Skip ephemeral subcolumns that don't store any real data. + if (ISerialization::isEphemeralSubcolumn(substream_path, substream_path.size())) + return; + auto stream_name = IMergeTreeDataPart::getStreamNameForColumn(column, substream_path, ".bin", data_part_storage); if (!stream_name) diff --git a/src/Storages/NamedCollectionsHelpers.h b/src/Storages/NamedCollectionsHelpers.h index f444a581eb6..bf2da7235a2 100644 --- a/src/Storages/NamedCollectionsHelpers.h +++ b/src/Storages/NamedCollectionsHelpers.h @@ -133,7 +133,7 @@ void validateNamedCollection( { throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Unexpected key {} in named collection. Required keys: {}, optional keys: {}", + "Unexpected key `{}` in named collection. Required keys: {}, optional keys: {}", backQuoteIfNeed(key), fmt::join(required_keys, ", "), fmt::join(optional_keys, ", ")); } } diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp index c896a760597..7aadba18817 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp @@ -262,10 +262,11 @@ struct DeltaLakeMetadataImpl partition_name, file_schema.toNamesAndTypesDescription()); } + LOG_TEST(log, "Partition {} value is {} (data type: {}, file: {})", + partition_name, value, name_and_type->type->getName(), filename); + auto field = getFieldValue(value, name_and_type->type); current_partition_columns.emplace_back(*name_and_type, field); - - LOG_TEST(log, "Partition {} value is {} (for {})", partition_name, value, filename); } } } @@ -332,6 +333,8 @@ struct DeltaLakeMetadataImpl WhichDataType which(check_type->getTypeId()); if (which.isStringOrFixedString()) return value; + else if (isBool(check_type)) + return parse(value); else if (which.isInt8()) return parse(value); else if (which.isUInt8()) diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index f632e553a0d..01f78673ed8 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -659,7 +659,7 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(pqxx::nontransaction & tx void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) { - std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); + std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", doubleQuoteString(publication_name)); tx.exec(query_str); LOG_DEBUG(log, "Dropped publication: {}", publication_name); } @@ -667,7 +667,7 @@ void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) void PostgreSQLReplicationHandler::addTableToPublication(pqxx::nontransaction & ntx, const String & table_name) { - std::string query_str = fmt::format("ALTER PUBLICATION {} ADD TABLE ONLY {}", publication_name, doubleQuoteWithSchema(table_name)); + std::string query_str = fmt::format("ALTER PUBLICATION {} ADD TABLE ONLY {}", doubleQuoteString(publication_name), doubleQuoteWithSchema(table_name)); ntx.exec(query_str); LOG_TRACE(log, "Added table {} to publication `{}`", doubleQuoteWithSchema(table_name), publication_name); } diff --git a/src/Storages/StorageExternalDistributed.cpp b/src/Storages/StorageExternalDistributed.cpp index 951c87807bb..9fc8b588c89 100644 --- a/src/Storages/StorageExternalDistributed.cpp +++ b/src/Storages/StorageExternalDistributed.cpp @@ -1,4 +1,4 @@ -#include "StorageExternalDistributed.h" +#include #include #include @@ -6,6 +6,8 @@ #include #include #include +#include +#include #include #include #include @@ -112,14 +114,39 @@ void registerStorageExternalDistributed(StorageFactory & factory) std::unordered_set shards; ASTs inner_engine_args(engine_args.begin() + 1, engine_args.end()); + ASTPtr * address_arg = nullptr; + + /// If there is a named collection argument, named `addresses_expr` + for (auto & node : inner_engine_args) + { + if (ASTFunction * func = node->as(); func && func->name == "equals" && func->arguments) + { + if (ASTExpressionList * func_args = func->arguments->as(); func_args && func_args->children.size() == 2) + { + if (ASTIdentifier * arg_name = func_args->children[0]->as(); arg_name && arg_name->name() == "addresses_expr") + { + address_arg = &func_args->children[1]; + break; + } + } + } + } + + /// Otherwise it is the first argument. + if (!address_arg) + address_arg = &inner_engine_args.at(0); + + String addresses_expr = checkAndGetLiteralArgument(*address_arg, "addresses"); + Strings shards_addresses = get_addresses(addresses_expr); + auto engine_name = checkAndGetLiteralArgument(engine_args[0], "engine_name"); if (engine_name == "URL") { - auto configuration = StorageURL::getConfiguration(inner_engine_args, context); - auto shards_addresses = get_addresses(configuration.addresses_expr); auto format_settings = StorageURL::getFormatSettingsFromArgs(args); for (const auto & shard_address : shards_addresses) { + *address_arg = std::make_shared(shard_address); + auto configuration = StorageURL::getConfiguration(inner_engine_args, context); auto uri_options = parseRemoteDescription(shard_address, 0, shard_address.size(), '|', max_addresses); if (uri_options.size() > 1) { @@ -140,13 +167,12 @@ void registerStorageExternalDistributed(StorageFactory & factory) else if (engine_name == "MySQL") { MySQLSettings mysql_settings; - auto configuration = StorageMySQL::getConfiguration(inner_engine_args, context, mysql_settings); - auto shards_addresses = get_addresses(configuration.addresses_expr); for (const auto & shard_address : shards_addresses) { - auto current_configuration{configuration}; - current_configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 3306); - auto pool = createMySQLPoolWithFailover(current_configuration, mysql_settings); + *address_arg = std::make_shared(shard_address); + auto configuration = StorageMySQL::getConfiguration(inner_engine_args, context, mysql_settings); + configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 3306); + auto pool = createMySQLPoolWithFailover(configuration, mysql_settings); shards.insert(std::make_shared( args.table_id, std::move(pool), configuration.database, configuration.table, /* replace_query = */ false, /* on_duplicate_clause = */ "", @@ -157,14 +183,13 @@ void registerStorageExternalDistributed(StorageFactory & factory) #if USE_LIBPQXX else if (engine_name == "PostgreSQL") { - auto configuration = StoragePostgreSQL::getConfiguration(inner_engine_args, context); - auto shards_addresses = get_addresses(configuration.addresses_expr); for (const auto & shard_address : shards_addresses) { - auto current_configuration{configuration}; - current_configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 5432); + *address_arg = std::make_shared(shard_address); + auto configuration = StoragePostgreSQL::getConfiguration(inner_engine_args, context); + configuration.addresses = parseRemoteDescriptionForExternalDatabase(shard_address, max_addresses, 5432); auto pool = std::make_shared( - current_configuration, + configuration, settings.postgresql_connection_pool_size, settings.postgresql_connection_pool_wait_timeout, settings.postgresql_connection_pool_retries, diff --git a/src/Storages/StorageExternalDistributed.h b/src/Storages/StorageExternalDistributed.h index c4d37c3e5cc..56c7fe86f34 100644 --- a/src/Storages/StorageExternalDistributed.h +++ b/src/Storages/StorageExternalDistributed.h @@ -8,8 +8,6 @@ namespace DB { -struct ExternalDataSourceConfiguration; - /// Storages MySQL and PostgreSQL use ConnectionPoolWithFailover and support multiple replicas. /// This class unites multiple storages with replicas into multiple shards with replicas. /// A query to external database is passed to one replica on each shard, the result is united. diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 2226de3e64f..4a655cac566 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -16,6 +16,7 @@ #include #include #include +#include namespace DB @@ -334,10 +335,17 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies( NameSet required_ttl_columns; NameSet updated_ttl_columns; - auto add_dependent_columns = [&updated_columns](const Names & required_columns, auto & to_set) + auto add_dependent_columns = [&updated_columns](const Names & required_columns, auto & to_set, bool is_projection = false) { for (const auto & dependency : required_columns) { + /// useful in the case of lightweight delete with wide part and option of rebuild projection + if (is_projection && updated_columns.contains(RowExistsColumn::name)) + { + to_set.insert(required_columns.begin(), required_columns.end()); + return true; + } + if (updated_columns.contains(dependency)) { to_set.insert(required_columns.begin(), required_columns.end()); @@ -357,7 +365,7 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies( for (const auto & projection : getProjections()) { if (has_dependency(projection.name, ColumnDependency::PROJECTION)) - add_dependent_columns(projection.getRequiredColumns(), projections_columns); + add_dependent_columns(projection.getRequiredColumns(), projections_columns, true); } auto add_for_rows_ttl = [&](const auto & expression, auto & to_set) diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 4c6c2fff209..e1256032493 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -3,6 +3,8 @@ #include #include +#include +#include #include #include @@ -14,6 +16,7 @@ #include #include #include +#include #include #include @@ -146,6 +149,13 @@ StorageMaterializedView::StorageMaterializedView( if (point_to_itself_by_uuid || point_to_itself_by_name) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Materialized view {} cannot point to itself", table_id_.getFullTableName()); + if (query.refresh_strategy) + { + fixed_uuid = false; + refresher = RefreshTask::create(this, getContext(), *query.refresh_strategy); + refresh_on_start = mode < LoadingStrictnessLevel::ATTACH && !query.is_create_empty; + } + if (!has_inner_table) { target_table_id = to_table_id; @@ -198,15 +208,6 @@ StorageMaterializedView::StorageMaterializedView( target_table_id = DatabaseCatalog::instance().getTable({manual_create_query->getDatabase(), manual_create_query->getTable()}, getContext())->getStorageID(); } - - if (query.refresh_strategy) - { - fixed_uuid = false; - refresher = RefreshTask::create( - getContext(), - *query.refresh_strategy); - refresh_on_start = mode < LoadingStrictnessLevel::ATTACH && !query.is_create_empty; - } } QueryProcessingStage::Enum StorageMaterializedView::getQueryProcessingStage( @@ -377,44 +378,71 @@ bool StorageMaterializedView::optimize( return storage_ptr->optimize(query, metadata_snapshot, partition, final, deduplicate, deduplicate_by_columns, cleanup, local_context); } -std::tuple> StorageMaterializedView::prepareRefresh() const +ContextMutablePtr StorageMaterializedView::createRefreshContext() const { auto refresh_context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(getContext()); + refresh_context->setQueryKind(ClientInfo::QueryKind::INITIAL_QUERY); /// Generate a random query id. refresh_context->setCurrentQueryId(""); + /// TODO: Set view's definer as the current user in refresh_context, so that the correct user's + /// quotas and permissions apply for this query. + return refresh_context; +} - CurrentThread::QueryScope query_scope(refresh_context); - +std::shared_ptr StorageMaterializedView::prepareRefresh(bool append, ContextMutablePtr refresh_context, std::optional & out_temp_table_id) const +{ auto inner_table_id = getTargetTableId(); - auto new_table_name = ".tmp" + generateInnerTableName(getStorageID()); + StorageID target_table = inner_table_id; - auto db = DatabaseCatalog::instance().getDatabase(inner_table_id.database_name); + if (!append) + { + CurrentThread::QueryScope query_scope(refresh_context); - auto create_table_query = db->getCreateTableQuery(inner_table_id.table_name, getContext()); - auto & create_query = create_table_query->as(); - create_query.setTable(new_table_name); - create_query.setDatabase(db->getDatabaseName()); - create_query.create_or_replace = true; - create_query.replace_table = true; - create_query.uuid = UUIDHelpers::Nil; + auto db = DatabaseCatalog::instance().getDatabase(inner_table_id.database_name); + String db_name = db->getDatabaseName(); + auto new_table_name = ".tmp" + generateInnerTableName(getStorageID()); - InterpreterCreateQuery create_interpreter(create_table_query, refresh_context); - create_interpreter.setInternal(true); - create_interpreter.execute(); + auto create_table_query = db->getCreateTableQuery(inner_table_id.table_name, getContext()); + auto & create_query = create_table_query->as(); + create_query.setTable(new_table_name); + create_query.setDatabase(db->getDatabaseName()); + create_query.create_or_replace = true; + create_query.replace_table = true; + create_query.uuid = UUIDHelpers::Nil; - StorageID fresh_table = DatabaseCatalog::instance().getTable({create_query.getDatabase(), create_query.getTable()}, getContext())->getStorageID(); + InterpreterCreateQuery create_interpreter(create_table_query, refresh_context); + create_interpreter.setInternal(true); + create_interpreter.execute(); + + target_table = DatabaseCatalog::instance().getTable({db_name, new_table_name}, getContext())->getStorageID(); + out_temp_table_id = target_table; + } auto insert_query = std::make_shared(); insert_query->select = getInMemoryMetadataPtr()->getSelectQuery().select_query; - insert_query->setTable(fresh_table.table_name); - insert_query->setDatabase(fresh_table.database_name); - insert_query->table_id = fresh_table; + insert_query->setTable(target_table.table_name); + insert_query->setDatabase(target_table.database_name); + insert_query->table_id = target_table; - return {refresh_context, insert_query}; + Block header; + if (refresh_context->getSettingsRef().allow_experimental_analyzer) + header = InterpreterSelectQueryAnalyzer::getSampleBlock(insert_query->select, refresh_context); + else + header = InterpreterSelectWithUnionQuery(insert_query->select, refresh_context, SelectQueryOptions()).getSampleBlock(); + + auto columns = std::make_shared(','); + for (const String & name : header.getNames()) + columns->children.push_back(std::make_shared(name)); + insert_query->columns = std::move(columns); + + return insert_query; } StorageID StorageMaterializedView::exchangeTargetTable(StorageID fresh_table, ContextPtr refresh_context) { + /// Known problem: if the target table was ALTERed during refresh, this will effectively revert + /// the ALTER. + auto stale_table_id = getTargetTableId(); auto db = DatabaseCatalog::instance().getDatabase(stale_table_id.database_name); @@ -422,15 +450,40 @@ StorageID StorageMaterializedView::exchangeTargetTable(StorageID fresh_table, Co CurrentThread::QueryScope query_scope(refresh_context); - target_db->renameTable( - refresh_context, fresh_table.table_name, *db, stale_table_id.table_name, /*exchange=*/true, /*dictionary=*/false); + auto rename_query = std::make_shared(); + rename_query->exchange = true; + rename_query->addElement(fresh_table.database_name, fresh_table.table_name, stale_table_id.database_name, stale_table_id.table_name); + + InterpreterRenameQuery(rename_query, refresh_context).execute(); std::swap(stale_table_id.database_name, fresh_table.database_name); std::swap(stale_table_id.table_name, fresh_table.table_name); + setTargetTableId(std::move(fresh_table)); return stale_table_id; } +void StorageMaterializedView::dropTempTable(StorageID table_id, ContextMutablePtr refresh_context) +{ + CurrentThread::QueryScope query_scope(refresh_context); + + try + { + auto drop_query = std::make_shared(); + drop_query->setDatabase(table_id.database_name); + drop_query->setTable(table_id.table_name); + drop_query->kind = ASTDropQuery::Kind::Drop; + drop_query->if_exists = true; + drop_query->sync = false; + + InterpreterDropQuery(drop_query, refresh_context).execute(); + } + catch (...) + { + tryLogCurrentException(&Poco::Logger::get("StorageMaterializedView"), "Failed to drop temporary table after refresh"); + } +} + void StorageMaterializedView::alter( const AlterCommands & params, ContextPtr local_context, @@ -530,25 +583,11 @@ void StorageMaterializedView::renameInMemory(const StorageID & new_table_id) { auto new_target_table_name = generateInnerTableName(new_table_id); - ASTRenameQuery::Elements rename_elements; assert(inner_table_id.database_name == old_table_id.database_name); - ASTRenameQuery::Element elem - { - ASTRenameQuery::Table - { - inner_table_id.database_name.empty() ? nullptr : std::make_shared(inner_table_id.database_name), - std::make_shared(inner_table_id.table_name) - }, - ASTRenameQuery::Table - { - new_table_id.database_name.empty() ? nullptr : std::make_shared(new_table_id.database_name), - std::make_shared(new_target_table_name) - } - }; - rename_elements.emplace_back(std::move(elem)); + auto rename = std::make_shared(); + rename->addElement(inner_table_id.database_name, inner_table_id.table_name, new_table_id.database_name, new_target_table_name); - auto rename = std::make_shared(std::move(rename_elements)); InterpreterRenameQuery(rename, getContext()).execute(); updateTargetTableId(new_table_id.database_name, new_target_table_name); } @@ -576,7 +615,7 @@ void StorageMaterializedView::startup() if (refresher) { - refresher->initializeAndStart(std::static_pointer_cast(shared_from_this())); + refresher->initializeAndStart(); if (refresh_on_start) refresher->run(); diff --git a/src/Storages/StorageMaterializedView.h b/src/Storages/StorageMaterializedView.h index 5ecd2ec3819..a09ee07b3f6 100644 --- a/src/Storages/StorageMaterializedView.h +++ b/src/Storages/StorageMaterializedView.h @@ -5,7 +5,7 @@ #include #include -#include +#include namespace DB { @@ -106,7 +106,7 @@ private: /// Will be initialized in constructor StorageID target_table_id = StorageID::createEmpty(); - RefreshTaskHolder refresher; + OwnedRefreshTask refresher; bool refresh_on_start = false; bool has_inner_table = false; @@ -119,10 +119,14 @@ private: void checkStatementCanBeForwarded() const; - /// Prepare to refresh a refreshable materialized view: create query context, create temporary - /// table, form the insert-select query. - std::tuple> prepareRefresh() const; + ContextMutablePtr createRefreshContext() const; + /// Prepare to refresh a refreshable materialized view: create temporary table and form the + /// insert-select query. + /// out_temp_table_id may be assigned before throwing an exception, in which case the caller + /// must drop the temp table before rethrowing. + std::shared_ptr prepareRefresh(bool append, ContextMutablePtr refresh_context, std::optional & out_temp_table_id) const; StorageID exchangeTargetTable(StorageID fresh_table, ContextPtr refresh_context); + void dropTempTable(StorageID table, ContextMutablePtr refresh_context); void setTargetTableId(StorageID id); void updateTargetTableId(std::optional database_name, std::optional table_name); diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 78dbb72c199..f7701a2aab8 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -895,7 +895,7 @@ void StorageMergeTree::loadDeduplicationLog() std::string path = fs::path(relative_data_path) / "deduplication_logs"; /// If either there is already a deduplication log, or we will be able to use it. - if (disk->exists(path) || !disk->isReadOnly()) + if (!disk->isReadOnly() || disk->exists(path)) { deduplication_log = std::make_unique(path, settings->non_replicated_deduplication_window, format_version, disk); deduplication_log->load(); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a3c1ab7cdff..80a7e862f72 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -800,7 +800,8 @@ void StorageReplicatedMergeTree::createNewZooKeeperNodes() { auto res = future.get(); if (res.error != Coordination::Error::ZOK && res.error != Coordination::Error::ZNODEEXISTS) - throw Coordination::Exception(res.error, "Failed to create new nodes {} at {}", res.path_created, zookeeper_path); + throw Coordination::Exception(res.error, "Failed to create new nodes {} at {} with error {}", + res.path_created, zookeeper_path, Coordination::errorMessage(res.error)); } } @@ -5193,17 +5194,16 @@ void StorageReplicatedMergeTree::startupImpl(bool from_attach_thread) startBeingLeader(); - /// Activate replica in a separate thread if we are not calling from attach thread - restarting_thread.start(/*schedule=*/!from_attach_thread); - if (from_attach_thread) { LOG_TRACE(log, "Trying to startup table from right now"); - /// Try activating replica in current thread. + /// Try activating replica in the current thread. restarting_thread.run(); + restarting_thread.start(false); } else { + restarting_thread.start(true); /// Wait while restarting_thread finishing initialization. /// NOTE It does not mean that replication is actually started after receiving this event. /// It only means that an attempt to startup replication was made. @@ -5224,7 +5224,7 @@ void StorageReplicatedMergeTree::startupImpl(bool from_attach_thread) session_expired_callback_handler = EventNotifier::instance().subscribe(Coordination::Error::ZSESSIONEXPIRED, [this]() { LOG_TEST(log, "Received event for expired session. Waking up restarting thread"); - restarting_thread.start(); + restarting_thread.start(true); }); startBackgroundMovesIfNeeded(); @@ -5293,7 +5293,6 @@ void StorageReplicatedMergeTree::flushAndPrepareForShutdown() LOG_TRACE(log, "The attach thread is shutdown"); } - restarting_thread.shutdown(/* part_of_full_shutdown */true); /// Explicitly set the event, because the restarting thread will not set it again startup_event.set(); @@ -5704,7 +5703,8 @@ std::optional StorageReplicatedMergeTree::distributedWriteFromClu { auto connection = std::make_shared( node.host_name, node.port, query_context->getGlobalContext()->getCurrentDatabase(), - node.user, node.password, SSHKey(), /*jwt*/"", node.quota_key, node.cluster, node.cluster_secret, + node.user, node.password, node.proto_send_chunked, node.proto_recv_chunked, + SSHKey(), /*jwt*/"", node.quota_key, node.cluster, node.cluster_secret, "ParallelInsertSelectInititiator", node.compression, node.secure @@ -6340,7 +6340,7 @@ void StorageReplicatedMergeTree::alter( "Metadata on replica is not up to date with common metadata in Zookeeper. " "It means that this replica still not applied some of previous alters." " Probably too many alters executing concurrently (highly not recommended). " - "You can retry this error"); + "You can retry the query"); /// Cannot retry automatically, because some zookeeper ops were lost on the first attempt. Will retry on DDLWorker-level. if (query_context->getZooKeeperMetadataTransaction()) diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index 35b9c0008c6..67dfe3bfe86 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -457,6 +457,7 @@ const char * auto_contributors[] { "Gleb-Tretyakov", "GoGoWen2021", "Gosha Letov", + "Graham Campbell", "Gregory", "Grigorii Sokolik", "Grigory", @@ -472,6 +473,7 @@ const char * auto_contributors[] { "Habibullah Oladepo", "HaiBo Li", "Hakob Saghatelyan", + "Halersson Paris", "Hamoon", "Han Fei", "Han Shukai", @@ -541,6 +543,7 @@ const char * auto_contributors[] { "JackyWoo", "Jacob Hayes", "Jacob Herrington", + "Jacob Reckhard", "Jai Jhala", "Jake Bamrah", "Jake Liu", @@ -661,6 +664,7 @@ const char * auto_contributors[] { "LaurieLY", "Lee sungju", "Lemore", + "Lennard Eijsackers", "Leonardo Cecchi", "Leonardo Maciel", "Leonid Krylov", @@ -922,6 +926,7 @@ const char * auto_contributors[] { "Pervakov Grigorii", "Pervakov Grigory", "Peter", + "Peter Nguyen", "Petr Vasilev", "Pham Anh Tuan", "Philip Hallstrom", @@ -981,6 +986,7 @@ const char * auto_contributors[] { "Ronald Bradford", "Rory Crispin", "Roy Bellingan", + "Ruihang Xia", "Ruslan", "Ruslan Mardugalliamov", "Ruslan Savchenko", @@ -1000,9 +1006,11 @@ const char * auto_contributors[] { "Sami Kerola", "Samuel Chou", "Samuel Colvin", + "Samuele Guerrini", "San", "Sanjam Panda", "Sariel", + "Sasha Sheikin", "Saulius Valatka", "Sean Haynes", "Sean Lafferty", @@ -1202,6 +1210,7 @@ const char * auto_contributors[] { "Vladimir Makarov", "Vladimir Mihailenco", "Vladimir Smirnov", + "Vladimir Varankin", "Vladislav Rassokhin", "Vladislav Smirnov", "Vladislav V", @@ -1275,6 +1284,7 @@ const char * auto_contributors[] { "Zhichun Wu", "Zhiguo Zhou", "Zhipeng", + "Zhukova, Maria", "Zhuo Qiu", "Zijie Lu", "Zimu Li", @@ -1502,6 +1512,7 @@ const char * auto_contributors[] { "hchen9", "hcz", "hdhoang", + "heguangnan", "heleihelei", "helifu", "hendrik-m", @@ -1572,6 +1583,7 @@ const char * auto_contributors[] { "kevinyhzou", "kgurjev", "khamadiev", + "khodyrevyurii", "kigerzhang", "kirillikoff", "kmeaw", @@ -1787,6 +1799,7 @@ const char * auto_contributors[] { "ruslandoga", "ryzuo", "s-kat", + "sakulali", "sanjam", "santaux", "santrancisco", @@ -1804,6 +1817,7 @@ const char * auto_contributors[] { "shabroo", "shangshujie", "shedx", + "shiyer7474", "shuai-xu", "shuchaome", "shuyang", @@ -1901,6 +1915,7 @@ const char * auto_contributors[] { "wzl", "xPoSx", "xbthink", + "xc0derx", "xiao", "xiaolei565", "xiebin", @@ -1964,6 +1979,7 @@ const char * auto_contributors[] { "zkun", "zlx19950903", "zombee0", + "zoomxi", "zvonand", "zvrr", "zvvr", diff --git a/src/Storages/System/StorageSystemFilesystemCache.cpp b/src/Storages/System/StorageSystemFilesystemCache.cpp index cfb388bc232..0e972d8411b 100644 --- a/src/Storages/System/StorageSystemFilesystemCache.cpp +++ b/src/Storages/System/StorageSystemFilesystemCache.cpp @@ -47,6 +47,9 @@ void StorageSystemFilesystemCache::fillData(MutableColumns & res_columns, Contex for (const auto & [cache_name, cache_data] : caches) { const auto & cache = cache_data->cache; + if (!cache->isInitialized()) + continue; + cache->iterate([&](const FileSegment::Info & file_segment) { size_t i = 0; diff --git a/src/Storages/System/StorageSystemFilesystemCacheSettings.cpp b/src/Storages/System/StorageSystemFilesystemCacheSettings.cpp index 8915032baf7..c6bba6b8598 100644 --- a/src/Storages/System/StorageSystemFilesystemCacheSettings.cpp +++ b/src/Storages/System/StorageSystemFilesystemCacheSettings.cpp @@ -21,6 +21,7 @@ ColumnsDescription StorageSystemFilesystemCacheSettings::getColumnsDescription() {"path", std::make_shared(), "Cache directory"}, {"max_size", std::make_shared(), "Cache size limit by the number of bytes"}, {"max_elements", std::make_shared(), "Cache size limit by the number of elements"}, + {"is_initialized", std::make_shared(), "Whether the cache is initialized and ready to be used"}, {"current_size", std::make_shared(), "Current cache size by the number of bytes"}, {"current_elements", std::make_shared(), "Current cache size by the number of elements"}, {"max_file_segment_size", std::make_shared(), "Maximum allowed file segment size"}, @@ -56,6 +57,7 @@ void StorageSystemFilesystemCacheSettings::fillData( res_columns[i++]->insert(settings.base_path); res_columns[i++]->insert(settings.max_size); res_columns[i++]->insert(settings.max_elements); + res_columns[i++]->insert(cache->isInitialized()); res_columns[i++]->insert(cache->getUsedCacheSize()); res_columns[i++]->insert(cache->getFileSegmentsNum()); res_columns[i++]->insert(settings.max_file_segment_size); diff --git a/src/Storages/System/StorageSystemOne.cpp b/src/Storages/System/StorageSystemOne.cpp index 936d55e61a0..70377715dc3 100644 --- a/src/Storages/System/StorageSystemOne.cpp +++ b/src/Storages/System/StorageSystemOne.cpp @@ -41,7 +41,10 @@ Pipe StorageSystemOne::read( auto column = DataTypeUInt8().createColumnConst(1, 0u)->convertToFullColumnIfConst(); Chunk chunk({ std::move(column) }, 1); - return Pipe(std::make_shared(std::move(header), std::move(chunk))); + auto source = std::make_shared(std::move(header), std::move(chunk)); + source->addTotalRowsApprox(1); + + return Pipe(source); } diff --git a/src/Storages/System/StorageSystemViewRefreshes.cpp b/src/Storages/System/StorageSystemViewRefreshes.cpp index 061201017a7..6e0dab1468d 100644 --- a/src/Storages/System/StorageSystemViewRefreshes.cpp +++ b/src/Storages/System/StorageSystemViewRefreshes.cpp @@ -34,8 +34,9 @@ ColumnsDescription StorageSystemViewRefreshes::getColumnsDescription() "If status = 'WaitingForDependencies', a refresh is ready to start as soon as these dependencies are fulfilled." }, {"exception", std::make_shared(), - "if last_refresh_result = 'Exception', i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace." + "if last_refresh_result = 'Error', i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace." }, + {"retry", std::make_shared(), "How many failed attempts there were so far, for the current refresh."}, {"refresh_count", std::make_shared(), "Number of successful refreshes since last server restart or table creation."}, {"progress", std::make_shared(), "Progress of the current refresh, between 0 and 1."}, {"elapsed", std::make_shared(), "The amount of nanoseconds the current refresh took."}, @@ -88,6 +89,7 @@ void StorageSystemViewRefreshes::fillData( res_columns[i++]->insert(Array(deps)); res_columns[i++]->insert(refresh.exception_message); + res_columns[i++]->insert(refresh.retry); res_columns[i++]->insert(refresh.refresh_count); res_columns[i++]->insert(Float64(refresh.progress.read_rows) / refresh.progress.total_rows_to_read); res_columns[i++]->insert(refresh.progress.elapsed_ns / 1e9); diff --git a/src/TableFunctions/TableFunctionMongoDB.cpp b/src/TableFunctions/TableFunctionMongoDB.cpp index b2cf1b4675e..94279d1bf6d 100644 --- a/src/TableFunctions/TableFunctionMongoDB.cpp +++ b/src/TableFunctions/TableFunctionMongoDB.cpp @@ -1,5 +1,4 @@ #include -#include #include diff --git a/src/TableFunctions/TableFunctionRedis.cpp b/src/TableFunctions/TableFunctionRedis.cpp index f87ba6d1c6d..aca751c2840 100644 --- a/src/TableFunctions/TableFunctionRedis.cpp +++ b/src/TableFunctions/TableFunctionRedis.cpp @@ -15,7 +15,6 @@ #include #include -#include namespace DB diff --git a/tests/analyzer_tech_debt.txt b/tests/analyzer_tech_debt.txt index bd92465e1aa..c8edbdc5932 100644 --- a/tests/analyzer_tech_debt.txt +++ b/tests/analyzer_tech_debt.txt @@ -1,4 +1,3 @@ 01624_soft_constraints -02354_vector_search_queries # Check after ConstantNode refactoring 02944_variant_as_common_type diff --git a/tests/ci/.mypy.ini b/tests/ci/.mypy.ini index f12d27979ce..ecb4aef87dd 100644 --- a/tests/ci/.mypy.ini +++ b/tests/ci/.mypy.ini @@ -15,4 +15,5 @@ warn_return_any = True no_implicit_reexport = True strict_equality = True extra_checks = True -ignore_missing_imports = True \ No newline at end of file +ignore_missing_imports = True +logging-fstring-interpolation = False \ No newline at end of file diff --git a/tests/ci/artifactory.py b/tests/ci/artifactory.py index f3d7d24f717..9457fa32ad3 100644 --- a/tests/ci/artifactory.py +++ b/tests/ci/artifactory.py @@ -143,6 +143,8 @@ class DebianArtifactory: print(f" {cmd}") Shell.check(cmd, strict=True) Shell.check("sync") + time.sleep(10) + Shell.check(f"lsof +D R2MountPoint.MOUNT_POINT", verbose=True) def test_packages(self): Shell.check("docker pull ubuntu:latest", strict=True) diff --git a/tests/ci/auto_release.py b/tests/ci/auto_release.py index 3cc88634004..89714b2fb4b 100644 --- a/tests/ci/auto_release.py +++ b/tests/ci/auto_release.py @@ -46,6 +46,7 @@ def parse_args(): MAX_NUMBER_OF_COMMITS_TO_CONSIDER_FOR_RELEASE = 5 AUTORELEASE_INFO_FILE = "/tmp/autorelease_info.json" +AUTORELEASE_MATRIX_PARAMS = "/tmp/autorelease_params.json" @dataclasses.dataclass @@ -74,6 +75,14 @@ class AutoReleaseInfo: with open(AUTORELEASE_INFO_FILE, "w", encoding="utf-8") as f: print(json.dumps(dataclasses.asdict(self), indent=2), file=f) + # dump file for GH action matrix that is similar to the file above but with dropped not ready release branches + params = dataclasses.asdict(self) + params["releases"] = [ + release for release in params["releases"] if release["ready"] + ] + with open(AUTORELEASE_MATRIX_PARAMS, "w", encoding="utf-8") as f: + print(json.dumps(params, indent=2), file=f) + @staticmethod def from_file() -> "AutoReleaseInfo": with open(AUTORELEASE_INFO_FILE, "r", encoding="utf-8") as json_file: @@ -102,7 +111,6 @@ def _prepare(token): refs = list(repo.get_git_matching_refs(f"tags/v{pr.head.ref}")) assert refs - refs.sort(key=lambda ref: ref.ref) latest_release_tag_ref = refs[-1] latest_release_tag = repo.get_git_tag(latest_release_tag_ref.object.sha) @@ -110,6 +118,10 @@ def _prepare(token): f"git rev-list --first-parent {latest_release_tag.tag}..origin/{pr.head.ref}", ).split("\n") commit_num = len(commits) + if latest_release_tag.tag.endswith("new"): + print("It's a new release branch - skip auto release for it") + continue + print( f"Previous release [{latest_release_tag.tag}] was [{commit_num}] commits ago, date [{latest_release_tag.tagger.date}]" ) @@ -133,16 +145,33 @@ def _prepare(token): commits_to_branch_head += 1 continue - commit_ci_status = CI.GH.get_commit_status_by_name( - token=token, - commit_sha=commit, - status_name=(CI.JobNames.BUILD_CHECK, "ClickHouse build check"), - ) + # TODO: switch to check if CI is entirely green + statuses = [ + CI.GH.get_commit_status_by_name( + token=token, + commit_sha=commit, + # handle old name for old releases + status_name=(CI.JobNames.BUILD_CHECK, "ClickHouse build check"), + ), + CI.GH.get_commit_status_by_name( + token=token, + commit_sha=commit, + # handle old name for old releases + status_name=CI.JobNames.STATELESS_TEST_RELEASE, + ), + CI.GH.get_commit_status_by_name( + token=token, + commit_sha=commit, + # handle old name for old releases + status_name=CI.JobNames.STATEFUL_TEST_RELEASE, + ), + ] commit_sha = commit - if commit_ci_status == SUCCESS: + if any(status == SUCCESS for status in statuses): + commit_ci_status = SUCCESS break - print(f"CI status [{commit_ci_status}] - skip") + print(f"CI status [{statuses}] - skip") commits_to_branch_head += 1 ready = False diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py index 39e426945d3..554ba339892 100755 --- a/tests/ci/changelog.py +++ b/tests/ci/changelog.py @@ -19,7 +19,6 @@ from env_helper import TEMP_PATH from git_helper import git_runner, is_shallow from github_helper import GitHub, PullRequest, PullRequests, Repository from s3_helper import S3Helper -from get_robot_token import get_best_robot_token from ci_utils import Shell from version_helper import ( FILE_WITH_VERSION_PATH, @@ -115,7 +114,6 @@ def get_descriptions(prs: PullRequests) -> Dict[str, List[Description]]: # pylint: enable=protected-access if repo_name not in repos: repos[repo_name] = pr.base.repo - in_changelog = False merge_commit = pr.merge_commit_sha if merge_commit is None: logging.warning("PR %s does not have merge-commit, skipping", pr.number) @@ -173,7 +171,6 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--gh-user-or-token", help="user name or GH token to authenticate", - default=get_best_robot_token(), ) parser.add_argument( "--gh-password", @@ -291,7 +288,7 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri # Normalize bug fixes if ( re.match( - r"(?i)bug\Wfix", + r".*(?i)bug\Wfix", category, ) # Map "Critical Bug Fix" to "Bug fix" category for changelog diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 49b597333dc..a9ae078b449 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -50,7 +50,6 @@ from github_helper import GitHub from pr_info import PRInfo from report import ( ERROR, - FAILURE, PENDING, SUCCESS, BuildResult, @@ -62,11 +61,11 @@ from report import ( FAIL, ) from s3_helper import S3Helper -from stopwatch import Stopwatch from tee_popen import TeePopen from ci_cache import CiCache from ci_settings import CiSettings from ci_buddy import CIBuddy +from stopwatch import Stopwatch from version_helper import get_version_from_repo # pylint: disable=too-many-lines @@ -370,8 +369,8 @@ def _pre_action(s3, job_name, batch, indata, pr_info): # skip_status = SUCCESS already there GH.print_in_group("Commit Status Data", job_status) - # create pre report - jr = JobReport.create_pre_report(status=skip_status, job_skipped=to_be_skipped) + # create dummy report + jr = JobReport.create_dummy(status=skip_status, job_skipped=to_be_skipped) jr.dump() if not to_be_skipped: @@ -990,19 +989,21 @@ def _run_test(job_name: str, run_command: str) -> int: stopwatch = Stopwatch() job_log = Path(TEMP_PATH) / "job_log.txt" with TeePopen(run_command, job_log, env, timeout) as process: + print(f"Job process started, pid [{process.process.pid}]") retcode = process.wait() if retcode != 0: print(f"Run action failed for: [{job_name}] with exit code [{retcode}]") - if timeout and process.timeout_exceeded: - print(f"Timeout {timeout} exceeded, dumping the job report") - JobReport( - status=FAILURE, - description=f"Timeout {timeout} exceeded", - test_results=[TestResult.create_check_timeout_expired(timeout)], - start_time=stopwatch.start_time_str, - duration=stopwatch.duration_seconds, - additional_files=[job_log], - ).dump() + if process.timeout_exceeded: + print(f"Job timed out: [{job_name}] exit code [{retcode}]") + assert JobReport.exist(), "JobReport real or dummy must be present" + jr = JobReport.load() + if jr.dummy: + print( + f"ERROR: Run action failed with timeout and did not generate JobReport - update dummy report with execution time" + ) + jr.test_results = [TestResult.create_check_timeout_expired()] + jr.duration = stopwatch.duration_seconds + jr.additional_files += [job_log] print(f"Run action done for: [{job_name}]") return retcode @@ -1205,7 +1206,7 @@ def main() -> int: job_report ), "BUG. There must be job report either real report, or pre-report if job was killed" error_description = "" - if not job_report.pre_report: + if not job_report.dummy: # it's a real job report ch_helper = ClickHouseHelper() check_url = "" @@ -1329,10 +1330,20 @@ def main() -> int: if CI.is_test_job(args.job_name): gh = GitHub(get_best_robot_token(), per_page=100) commit = get_commit(gh, pr_info.sha) + check_url = "" + if job_report.test_results or job_report.additional_files: + check_url = upload_result_helper.upload_results( + s3, + pr_info.number, + pr_info.sha, + job_report.test_results, + job_report.additional_files, + job_report.check_name or _get_ext_check_name(args.job_name), + ) post_commit_status( commit, ERROR, - "", + check_url, "Error: " + error_description, _get_ext_check_name(args.job_name), pr_info, diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 7a19eb6f827..4bff1374ec0 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -94,7 +94,8 @@ class CI: package_type="deb", static_binary_name="aarch64", additional_pkgs=True, - ) + ), + runner_type=Runners.BUILDER_ARM, ), BuildNames.PACKAGE_ASAN: CommonJobConfigs.BUILD.with_properties( build_config=BuildConfig( @@ -162,6 +163,7 @@ class CI: tidy=True, comment="clang-tidy is used for static analysis", ), + timeout=14400, ), BuildNames.BINARY_DARWIN: CommonJobConfigs.BUILD.with_properties( build_config=BuildConfig( @@ -315,6 +317,7 @@ class CI: JobNames.STATEFUL_TEST_PARALLEL_REPL_TSAN: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], random_bucket="parrepl_with_sanitizer", + timeout=3600, ), JobNames.STATELESS_TEST_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2 @@ -342,17 +345,17 @@ class CI: runner_type=Runners.FUNC_TESTER_ARM, ), JobNames.STATELESS_TEST_OLD_ANALYZER_S3_REPLICATED_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4 + required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=2 ), JobNames.STATELESS_TEST_S3_DEBUG: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=2 + required_builds=[BuildNames.PACKAGE_DEBUG], num_batches=1 ), JobNames.STATELESS_TEST_AZURE_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN], num_batches=3, release_only=True ), JobNames.STATELESS_TEST_S3_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], - num_batches=4, + num_batches=3, ), JobNames.STRESS_TEST_DEBUG: CommonJobConfigs.STRESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_DEBUG], @@ -397,10 +400,14 @@ class CI: required_builds=[BuildNames.PACKAGE_DEBUG], pr_only=True ), JobNames.INTEGRATION_TEST_ASAN: CommonJobConfigs.INTEGRATION_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], release_only=True, num_batches=4 + required_builds=[BuildNames.PACKAGE_ASAN], + release_only=True, + num_batches=4, + timeout=10800, ), JobNames.INTEGRATION_TEST_ASAN_OLD_ANALYZER: CommonJobConfigs.INTEGRATION_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ASAN], num_batches=6 + required_builds=[BuildNames.PACKAGE_ASAN], + num_batches=6, ), JobNames.INTEGRATION_TEST_TSAN: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], num_batches=6 @@ -531,7 +538,10 @@ class CI: JobNames.FAST_TEST: JobConfig( pr_only=True, digest=DigestConfig( - include_paths=["./tests/queries/0_stateless/"], + include_paths=[ + "./tests/queries/0_stateless/", + "./tests/docker_scripts/", + ], exclude_files=[".md"], docker=["clickhouse/fasttest"], ), diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index 48847b0d7a6..1cdb3f1487e 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -57,6 +57,7 @@ class Runners(metaclass=WithIter): """ BUILDER = "builder" + BUILDER_ARM = "builder-aarch64" STYLE_CHECKER = "style-checker" STYLE_CHECKER_ARM = "style-checker-aarch64" FUNC_TESTER = "func-tester" @@ -331,7 +332,7 @@ class JobConfig: # will be triggered for the job if omitted in CI workflow yml run_command: str = "" # job timeout, seconds - timeout: Optional[int] = None + timeout: int = 7200 # sets number of batches for a multi-batch job num_batches: int = 1 # label that enables job in CI, if set digest isn't used @@ -414,13 +415,13 @@ class CommonJobConfigs: "./tests/clickhouse-test", "./tests/config", "./tests/*.txt", + "./tests/docker_scripts/", ], exclude_files=[".md"], docker=["clickhouse/stateless-test"], ), run_command='functional_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, - timeout=9000, ) STATEFUL_TEST = JobConfig( job_name_keyword="stateful", @@ -431,6 +432,7 @@ class CommonJobConfigs: "./tests/clickhouse-test", "./tests/config", "./tests/*.txt", + "./tests/docker_scripts/", ], exclude_files=[".md"], docker=["clickhouse/stateful-test"], @@ -448,6 +450,7 @@ class CommonJobConfigs: "./tests/clickhouse-test", "./tests/config", "./tests/*.txt", + "./tests/docker_scripts/", ], exclude_files=[".md"], docker=["clickhouse/stress-test"], @@ -459,12 +462,13 @@ class CommonJobConfigs: UPGRADE_TEST = JobConfig( job_name_keyword="upgrade", digest=DigestConfig( - include_paths=["./tests/ci/upgrade_check.py"], + include_paths=["./tests/ci/upgrade_check.py", "./tests/docker_scripts/"], exclude_files=[".md"], - docker=["clickhouse/upgrade-check"], + docker=["clickhouse/stress-test"], ), run_command="upgrade_check.py", runner_type=Runners.STRESS_TESTER, + timeout=3600, ) INTEGRATION_TEST = JobConfig( job_name_keyword="integration", diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index d807f5be09f..a4c0977f47c 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -102,21 +102,29 @@ class GH: assert len(commit_sha) == 40 assert Utils.is_hex(commit_sha) assert not Utils.is_hex(token) - url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/statuses?per_page={200}" + + url = f"https://api.github.com/repos/{Envs.GITHUB_REPOSITORY}/commits/{commit_sha}/statuses" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json", } - response = requests.get(url, headers=headers, timeout=5) if isinstance(status_name, str): status_name = (status_name,) - if response.status_code == 200: - assert "next" not in response.links, "Response truncated" - statuses = response.json() - for status in statuses: - if status["context"] in status_name: - return status["state"] # type: ignore + + while url: + response = requests.get(url, headers=headers, timeout=5) + if response.status_code == 200: + statuses = response.json() + for status in statuses: + if status["context"] in status_name: + return status["state"] # type: ignore + + # Check if there is a next page + url = response.links.get("next", {}).get("url") + else: + break + return "" @staticmethod @@ -167,6 +175,11 @@ class GH: latest_branch = Shell.get_output( 'gh pr list --label release --repo ClickHouse/ClickHouse --search "sort:created" -L1 --json headRefName' ) + if latest_branch: + latest_branch = json.loads(latest_branch)[0]["headRefName"] + print( + f"Latest branch [{latest_branch}], release branch [{branch}], release latest [{latest_branch == branch}]" + ) return latest_branch == branch diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index 27eba273ce0..68268b033fe 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -61,6 +61,7 @@ class ReleaseContextManager: # create initial release info self.release_info = ReleaseInfo( release_branch="NA", + release_type="NA", commit_sha=args.ref, release_tag="NA", version="NA", @@ -93,6 +94,7 @@ class ReleaseContextManager: @dataclasses.dataclass class ReleaseInfo: version: str + release_type: str release_tag: str release_branch: str commit_sha: str @@ -131,7 +133,7 @@ class ReleaseInfo: return self def prepare( - self, commit_ref: str, release_type: str, skip_tag_check: bool + self, commit_ref: str, release_type: str, _skip_tag_check: bool ) -> "ReleaseInfo": version = None release_branch = None @@ -143,17 +145,18 @@ class ReleaseInfo: assert release_type in ("patch", "new") if release_type == "new": # check commit_ref is right and on a right branch - Shell.check( - f"git merge-base --is-ancestor {commit_ref} origin/master", - strict=True, - verbose=True, - ) + if commit_ref != "master": + Shell.check( + f"git merge-base --is-ancestor {commit_ref} origin/master", + strict=True, + verbose=True, + ) with checkout(commit_ref): commit_sha = Shell.get_output_or_raise(f"git rev-list -n1 {commit_ref}") # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) - release_branch = "master" + release_branch = f"{version.major}.{version.minor}" expected_prev_tag = f"v{version.major}.{version.minor}.1.1-new" version.bump().with_description(VersionType.NEW) assert ( @@ -204,10 +207,11 @@ class ReleaseInfo: expected_tag_prefix ) and git.latest_tag.endswith(expected_tag_suffix): pass - elif not skip_tag_check: - assert ( - False - ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]. Already Released?" + # TODO: uncomment and check with dry-run + # elif not skip_tag_check: + # assert ( + # False + # ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]. Already Released?" previous_release_sha = Shell.get_output_or_raise( f"git rev-list -n1 {previous_release_tag}" @@ -238,6 +242,7 @@ class ReleaseInfo: self.release_progress = ReleaseProgress.STARTED self.progress_status = ReleaseProgressDescription.OK self.latest = latest_release + self.release_type = release_type return self def push_release_tag(self, dry_run: bool) -> None: @@ -262,16 +267,15 @@ class ReleaseInfo: @staticmethod def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None: cmd = f"gh api repos/{CI.Envs.GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}" - Shell.check(cmd, dry_run=dry_run, strict=True) + res = Shell.check(cmd, dry_run=dry_run, verbose=True) + if not res: + # not a critical error - do not fail. branch might be created already (recovery case) + print("WARNING: failed to create backport labels for the new branch") def push_new_release_branch(self, dry_run: bool) -> None: - assert ( - self.release_branch == "master" - ), "New release branch can be created only for release type [new]" git = Git() version = get_version_from_repo(git=git) - new_release_branch = f"{version.major}.{version.minor}" - stable_release_type = version.get_stable_release_type() + new_release_branch = self.release_branch version_after_release = copy(version) version_after_release.bump() assert ( @@ -285,11 +289,8 @@ class ReleaseInfo: print( f"Create and push new release branch [{new_release_branch}], commit [{self.commit_sha}]" ) - with checkout(self.release_branch): + with checkout("master"): with checkout_new(new_release_branch): - pr_labels = f"--label {CI.Labels.RELEASE}" - if stable_release_type == VersionType.LTS: - pr_labels += f" --label {CI.Labels.RELEASE_LTS}" cmd_push_branch = ( f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}" ) @@ -302,67 +303,108 @@ class ReleaseInfo: ReleaseInfo._create_gh_label( f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run ) - Shell.check( - f"""gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}' - --head {new_release_branch} {pr_labels} - --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.' - """, - dry_run=dry_run, - strict=True, - verbose=True, - ) def get_version_bump_branch(self): return f"bump_version_{self.version}" def update_version_and_contributors_list(self, dry_run: bool) -> None: - # Bump version, update contributors list, create PR - branch_upd_version_contributors = self.get_version_bump_branch() + # Bump version, update contributors list, create on release branch with checkout(self.commit_sha): git = Git() version = get_version_from_repo(git=git) - if self.release_branch == "master": + if self.release_type == "patch": + assert ( + version.string == self.version + ), f"BUG: version in release info does not match version in git commit, expected [{self.version}], got [{version.string}]" + version.bump_patch() + else: + version.reset_tweak() + version.with_description(version.get_stable_release_type()) + + with checkout(self.release_branch): + update_cmake_version(version) + update_contributors(raise_error=True) + cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" + cmd_push_branch = f"{GIT_PREFIX} push" + Shell.check( + cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True + ) + Shell.check(cmd_push_branch, strict=True, dry_run=dry_run, verbose=True) + if dry_run: + Shell.check( + f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + Shell.check( + f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + + # TODO: move to new GH step? + if self.release_type == "new": + print("Update version on master branch") + branch_upd_version_contributors = self.get_version_bump_branch() + with checkout(self.commit_sha): + git = Git() + version = get_version_from_repo(git=git) version.bump() version.with_description(VersionType.TESTING) - else: - version.with_description(version.get_stable_release_type()) - assert ( - version.string == self.version - ), f"BUG: version in release info does not match version in git commit, expected [{self.version}], got [{version.string}]" - with checkout(self.release_branch): - with checkout_new(branch_upd_version_contributors): - update_cmake_version(version) - update_contributors(raise_error=True) - cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" - cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" - actor = os.getenv("GITHUB_ACTOR", "") or "me" - body = f"Automatic version bump after release {self.release_tag}\n### Changelog category (leave one):\n- Not for changelog (changelog entry is not required)\n" - cmd_create_pr = f"gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body \"{body}\" --assignee {actor}" + with checkout("master"): + with checkout_new(branch_upd_version_contributors): + update_cmake_version(version) + update_contributors(raise_error=True) + cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" + cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" + actor = os.getenv("GITHUB_ACTOR", "") or "me" + body = f"Automatic version bump after release {self.release_tag}\n### Changelog category (leave one):\n- Not for changelog (changelog entry is not required)\n" + cmd_create_pr = f"gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base master --body \"{body}\" --assignee {actor}" + Shell.check( + cmd_commit_version_upd, + strict=True, + dry_run=dry_run, + verbose=True, + ) + Shell.check( + cmd_push_branch, strict=True, dry_run=dry_run, verbose=True + ) + Shell.check( + cmd_create_pr, strict=True, dry_run=dry_run, verbose=True + ) + if dry_run: + Shell.check( + f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + Shell.check( + f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + self.version_bump_pr = "dry-run" + else: + self.version_bump_pr = GH.get_pr_url_by_branch( + branch=branch_upd_version_contributors + ) + + # TODO: move to new GH step? + print("Create Release PR") + with checkout(self.release_branch): + pr_labels = f"--label {CI.Labels.RELEASE}" + if version.get_stable_release_type() == VersionType.LTS: + pr_labels += f" --label {CI.Labels.RELEASE_LTS}" Shell.check( - cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True + f"""gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Release pull request for branch {self.release_branch}' \ + --head {self.release_branch} {pr_labels} \ + --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.'""", + dry_run=dry_run, + strict=True, + verbose=True, ) - Shell.check(cmd_push_branch, strict=True, dry_run=dry_run, verbose=True) - Shell.check(cmd_create_pr, strict=True, dry_run=dry_run, verbose=True) - if dry_run: - Shell.check( - f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", - verbose=True, - ) - Shell.check( - f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", - verbose=True, - ) - self.version_bump_pr = "dry-run" - else: - self.version_bump_pr = GH.get_pr_url_by_branch( - branch=branch_upd_version_contributors - ) def get_change_log_branch(self): return f"auto/{self.release_tag}" def update_release_info(self, dry_run: bool) -> "ReleaseInfo": - if self.release_branch != "master": + if self.release_type == "patch": if not self.changelog_pr: branch = self.get_change_log_branch() if not dry_run: @@ -371,21 +413,22 @@ class ReleaseInfo: url = "dry-run" print(f"ChangeLog PR url [{url}]") self.changelog_pr = url - - if not self.version_bump_pr: - branch = self.get_version_bump_branch() - if not dry_run: - url = GH.get_pr_url_by_branch(branch=branch) - else: - url = "dry-run" - print(f"Version bump PR url [{url}]") - self.version_bump_pr = url - - self.release_url = f"https://github.com/{CI.Envs.GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" - print(f"Release url [{self.release_url}]") - self.docker = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version" + else: + # new release branch - find version bump pr on a master branch + branch = self.get_version_bump_branch() + if not dry_run: + url = GH.get_pr_url_by_branch(branch=branch) + else: + url = "dry-run" + print(f"Version bump PR url [{url}]") + self.version_bump_pr = url + + self.release_url = f"https://github.com/{CI.Envs.GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" + print(f"Release url [{self.release_url}]") + self.dump() + return self def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None: @@ -410,35 +453,40 @@ class ReleaseInfo: def merge_prs(self, dry_run: bool) -> None: repo = CI.Envs.GITHUB_REPOSITORY - assert self.version_bump_pr - if dry_run: - version_bump_pr_num = 12345 - else: - version_bump_pr_num = int(self.version_bump_pr.split("/")[-1]) - print("Merging Version bump PR") - res_1 = Shell.check( - f"gh pr merge {version_bump_pr_num} --repo {repo} --merge --auto", - verbose=True, - dry_run=dry_run, - ) - - res_2 = True - if not self.release_tag.endswith("-new"): + if self.release_type == "patch": assert self.changelog_pr print("Merging ChangeLog PR") if dry_run: changelog_pr_num = 23456 else: changelog_pr_num = int(self.changelog_pr.split("/")[-1]) - res_2 = Shell.check( + res = Shell.check( f"gh pr merge {changelog_pr_num} --repo {repo} --merge --auto", verbose=True, dry_run=dry_run, ) else: - assert not self.changelog_pr + if not dry_run: + assert not self.changelog_pr + res = True - self.prs_merged = res_1 and res_2 + if self.release_type == "new": + assert self.version_bump_pr + print("Merging Version Bump PR") + if dry_run: + version_bump_pr = 23456 + else: + version_bump_pr = int(self.version_bump_pr.split("/")[-1]) + res = res and Shell.check( + f"gh pr merge {version_bump_pr} --repo {repo} --merge --auto", + verbose=True, + dry_run=dry_run, + ) + else: + if not dry_run: + assert not self.version_bump_pr + + self.prs_merged = res class RepoTypes: @@ -759,7 +807,7 @@ if __name__ == "__main__": release_info.prepare( commit_ref=args.ref, release_type=args.release_type, - skip_tag_check=args.skip_tag_check, + _skip_tag_check=args.skip_tag_check, ) if args.download_packages: diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py index 786a529e0a9..c8dbcd10245 100644 --- a/tests/ci/docker_images_check.py +++ b/tests/ci/docker_images_check.py @@ -93,7 +93,7 @@ def process_single_image( results = [] # type: TestResults for ver in versions: stopwatch = Stopwatch() - for i in range(5): + for i in range(2): success, build_log = build_and_push_one_image( image, ver, additional_cache, push, from_tag ) diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 3251ec5644e..34439c19f0a 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -70,7 +70,7 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--tag-type", type=str, - choices=("head", "release", "latest-release"), + choices=("head", "release", "release-latest"), default="head", help="defines required tags for resulting docker image. " "head - for master image (tag: head) " diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py index ed727dd3659..55eefcf9714 100644 --- a/tests/ci/fast_test_check.py +++ b/tests/ci/fast_test_check.py @@ -31,15 +31,14 @@ def get_fasttest_cmd( "--security-opt seccomp=unconfined " # required to issue io_uring sys-calls "--network=host " # required to get access to IAM credentials f"-e FASTTEST_WORKSPACE=/fasttest-workspace -e FASTTEST_OUTPUT=/test_output " - f"-e FASTTEST_SOURCE=/ClickHouse " + f"-e FASTTEST_SOURCE=/repo " f"-e FASTTEST_CMAKE_FLAGS='-DCOMPILER_CACHE=sccache' " f"-e PULL_REQUEST_NUMBER={pr_number} -e COMMIT_SHA={commit_sha} " f"-e COPY_CLICKHOUSE_BINARY_TO_OUTPUT=1 " f"-e SCCACHE_BUCKET={S3_BUILDS_BUCKET} -e SCCACHE_S3_KEY_PREFIX=ccache/sccache " "-e stage=clone_submodules " - f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/ClickHouse " - f"--volume={repo_path}/tests/analyzer_tech_debt.txt:/analyzer_tech_debt.txt " - f"--volume={output_path}:/test_output {image}" + f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/repo " + f"--volume={output_path}:/test_output {image} /repo/tests/docker_scripts/fasttest_runner.sh" ) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index b7391eff01b..ce2ead59d1a 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -5,10 +5,11 @@ import csv import logging import os import re +import signal import subprocess import sys from pathlib import Path -from typing import List, Tuple +from typing import List, Tuple, Optional from build_download_helper import download_all_deb_packages from clickhouse_helper import CiLogsCredentials @@ -25,11 +26,12 @@ from report import ( TestResults, read_test_results, FAILURE, + TestResult, ) from stopwatch import Stopwatch from tee_popen import TeePopen from ci_config import CI -from ci_utils import Utils +from ci_utils import Utils, Shell NO_CHANGES_MSG = "Nothing to run" @@ -113,32 +115,28 @@ def get_run_command( if flaky_check: envs.append("-e NUM_TRIES=50") - envs.append("-e MAX_RUN_TIME=2800") - else: - max_run_time = os.getenv("MAX_RUN_TIME", "0") - envs.append(f"-e MAX_RUN_TIME={max_run_time}") envs += [f"-e {e}" for e in additional_envs] env_str = " ".join(envs) - volume_with_broken_test = ( - f"--volume={repo_path}/tests/analyzer_tech_debt.txt:/analyzer_tech_debt.txt " - if "analyzer" not in check_name - else "" - ) + + if "stateful" in check_name.lower(): + run_script = "/repo/tests/docker_scripts/stateful_runner.sh" + elif "stateless" in check_name.lower(): + run_script = "/repo/tests/docker_scripts/stateless_runner.sh" + else: + assert False return ( - f"docker run --volume={builds_path}:/package_folder " + f"docker run --rm --name func-tester --volume={builds_path}:/package_folder " # For dmesg and sysctl "--privileged " - f"{ci_logs_args}" - f"--volume={repo_path}/tests:/usr/share/clickhouse-test " - f"--volume={repo_path}/utils/grpc-client:/usr/share/clickhouse-utils/grpc-client " - f"{volume_with_broken_test}" + f"{ci_logs_args} " + f"--volume={repo_path}:/repo " f"--volume={result_path}:/test_output " f"--volume={server_log_path}:/var/log/clickhouse-server " "--security-opt seccomp=unconfined " # required to issue io_uring sys-calls - f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image}" + f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image} {run_script}" ) @@ -198,7 +196,7 @@ def process_results( state, description = status[0][0], status[0][1] if ret_code != 0: state = ERROR - description += " (but script exited with an error)" + description = f"Job failed, exit code: {ret_code}. " + description try: results_path = result_directory / "test_results.tsv" @@ -240,7 +238,19 @@ def parse_args(): return parser.parse_args() +test_process = None # type: Optional[TeePopen] +timeout_expired = False + + +def handle_sigterm(signum, _frame): + print(f"WARNING: Received signal {signum}") + global timeout_expired + timeout_expired = True + Shell.check(f"docker exec func-tester pkill -f clickhouse-test", verbose=True) + + def main(): + signal.signal(signal.SIGTERM, handle_sigterm) logging.basicConfig(level=logging.INFO) for handler in logging.root.handlers: # pylint: disable=protected-access @@ -328,11 +338,13 @@ def main(): logging.info("Going to run func tests: %s", run_command) with TeePopen(run_command, run_log_path) as process: + global test_process + test_process = process retcode = process.wait() if retcode == 0: logging.info("Run successfully") else: - logging.info("Run failed") + logging.info("Run failed, exit code %s", retcode) try: subprocess.check_call( @@ -348,6 +360,13 @@ def main(): state, description, test_results, additional_logs = process_results( retcode, result_path, server_log_path ) + if timeout_expired: + description = "Timeout expired" + state = FAILURE + test_results.insert( + 0, TestResult.create_check_timeout_expired(stopwatch.duration_seconds) + ) + else: print( "This is validate bugfix or flaky check run, but no changes test to run - skip with success" diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index f5dbef4f6db..c3b71b85022 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -9,6 +9,7 @@ import random import re import shlex import shutil +import signal import string import subprocess import sys @@ -16,11 +17,13 @@ import time import zlib # for crc32 from collections import defaultdict from itertools import chain -from typing import Any, Dict +from typing import Any, Dict, Optional from env_helper import IS_CI from integration_test_images import IMAGES from tee_popen import TeePopen +from report import JOB_TIMEOUT_TEST_NAME +from stopwatch import Stopwatch MAX_RETRY = 1 NUM_WORKERS = 5 @@ -621,6 +624,9 @@ class ClickhouseIntegrationTestsRunner: test_data_dirs = {} for i in range(num_tries): + if timeout_expired: + print("Timeout expired - break test group execution") + break logging.info("Running test group %s for the %s retry", test_group, i) clear_ip_tables_and_restart_daemons() @@ -657,6 +663,8 @@ class ClickhouseIntegrationTestsRunner: logging.info("Executing cmd: %s", cmd) # ignore retcode, since it meaningful due to pipe to tee with subprocess.Popen(cmd, shell=True, stderr=log, stdout=log) as proc: + global runner_subprocess + runner_subprocess = proc proc.wait() extra_logs_names = [log_basename] @@ -780,6 +788,9 @@ class ClickhouseIntegrationTestsRunner: logs = [] tries_num = 1 if should_fail else FLAKY_TRIES_COUNT for i in range(tries_num): + if timeout_expired: + print("Timeout expired - break flaky check execution") + break final_retry += 1 logging.info("Running tests for the %s time", i) counters, tests_times, log_paths = self.try_run_test_group( @@ -839,6 +850,7 @@ class ClickhouseIntegrationTestsRunner: return result_state, status_text, test_result, logs def run_impl(self, repo_path, build_path): + stopwatch = Stopwatch() if self.flaky_check or self.bugfix_validate_check: return self.run_flaky_check( repo_path, build_path, should_fail=self.bugfix_validate_check @@ -921,6 +933,9 @@ class ClickhouseIntegrationTestsRunner: random.shuffle(items_to_run) for group, tests in items_to_run: + if timeout_expired: + print("Timeout expired - break tests execution") + break logging.info("Running test group %s containing %s tests", group, len(tests)) group_counters, group_test_times, log_paths = self.try_run_test_group( repo_path, group, tests, MAX_RETRY, NUM_WORKERS, 0 @@ -981,6 +996,17 @@ class ClickhouseIntegrationTestsRunner: status_text = "Timeout, " + status_text result_state = "failure" + if timeout_expired: + logging.error( + "Job killed by external timeout signal - setting status to failure!" + ) + status_text = "Job timeout expired, " + status_text + result_state = "failure" + # add mock test case to make timeout visible in job report and in ci db + test_result.insert( + 0, (JOB_TIMEOUT_TEST_NAME, "FAIL", f"{stopwatch.duration_seconds}", "") + ) + if not counters or sum(len(counter) for counter in counters.values()) == 0: status_text = "No tests found for some reason! It's a bug" result_state = "failure" @@ -1001,6 +1027,7 @@ def write_results(results_file, status_file, results, status): def run(): + signal.signal(signal.SIGTERM, handle_sigterm) logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") repo_path = os.environ.get("CLICKHOUSE_TESTS_REPO_PATH") @@ -1035,5 +1062,17 @@ def run(): logging.info("Result written") +timeout_expired = False +runner_subprocess = None # type:Optional[subprocess.Popen] + + +def handle_sigterm(signum, _frame): + print(f"WARNING: Received signal {signum}") + global timeout_expired + timeout_expired = True + if runner_subprocess: + runner_subprocess.send_signal(signal.SIGTERM) + + if __name__ == "__main__": run() diff --git a/tests/ci/report.py b/tests/ci/report.py index 6779a6dae96..a1b25b994c7 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -249,6 +249,7 @@ JOB_REPORT_FILE = Path(GITHUB_WORKSPACE) / "job_report.json" JOB_STARTED_TEST_NAME = "STARTED" JOB_FINISHED_TEST_NAME = "COMPLETED" +JOB_TIMEOUT_TEST_NAME = "Job Timeout Expired" @dataclass @@ -277,8 +278,8 @@ class TestResult: self.log_files.append(log_path) @staticmethod - def create_check_timeout_expired(timeout: float) -> "TestResult": - return TestResult("Check timeout expired", "FAIL", timeout) + def create_check_timeout_expired(duration: Optional[float] = None) -> "TestResult": + return TestResult(JOB_TIMEOUT_TEST_NAME, "FAIL", time=duration) TestResults = List[TestResult] @@ -303,7 +304,7 @@ class JobReport: # indicates that this is not real job report but report for the job that was skipped by rerun check job_skipped: bool = False # indicates that report generated by CI script in order to check later if job was killed before real report is generated - pre_report: bool = False + dummy: bool = False exit_code: int = -1 @staticmethod @@ -311,7 +312,7 @@ class JobReport: return datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") @classmethod - def create_pre_report(cls, status: str, job_skipped: bool) -> "JobReport": + def create_dummy(cls, status: str, job_skipped: bool) -> "JobReport": return JobReport( status=status, description="", @@ -320,7 +321,7 @@ class JobReport: duration=0.0, additional_files=[], job_skipped=job_skipped, - pre_report=True, + dummy=True, ) def update_duration(self): @@ -741,10 +742,21 @@ def create_test_html_report( has_test_time = any(tr.time is not None for tr in test_results) has_log_urls = False - # Display entires with logs at the top (they correspond to failed tests) - test_results.sort( - key=lambda result: result.raw_logs is None and result.log_files is None - ) + def sort_key(status): + if "fail" in status.lower(): + return 0 + elif "error" in status.lower(): + return 1 + elif "not" in status.lower(): + return 2 + elif "ok" in status.lower(): + return 10 + elif "success" in status.lower(): + return 9 + else: + return 5 + + test_results.sort(key=lambda result: sort_key(result.status)) for test_result in test_results: colspan = 0 diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py index 85da601e379..f9656e60448 100644 --- a/tests/ci/stress_check.py +++ b/tests/ci/stress_check.py @@ -57,10 +57,16 @@ def get_run_command( additional_envs: List[str], ci_logs_args: str, image: DockerImage, + upgrade_check: bool, ) -> str: envs = [f"-e {e}" for e in additional_envs] env_str = " ".join(envs) + if upgrade_check: + run_script = "/repo/tests/docker_scripts/upgrade_runner.sh" + else: + run_script = "/repo/tests/docker_scripts/stress_runner.sh" + cmd = ( "docker run --cap-add=SYS_PTRACE " # For dmesg and sysctl @@ -70,8 +76,8 @@ def get_run_command( f"{ci_logs_args}" f"--volume={build_path}:/package_folder " f"--volume={result_path}:/test_output " - f"--volume={repo_tests_path}:/usr/share/clickhouse-test " - f"--volume={server_log_path}:/var/log/clickhouse-server {env_str} {image} " + f"--volume={repo_tests_path}/..:/repo " + f"--volume={server_log_path}:/var/log/clickhouse-server {env_str} {image} {run_script}" ) return cmd @@ -128,7 +134,7 @@ def process_results( return state, description, test_results, additional_files -def run_stress_test(docker_image_name: str) -> None: +def run_stress_test(upgrade_check: bool = False) -> None: logging.basicConfig(level=logging.INFO) for handler in logging.root.handlers: # pylint: disable=protected-access @@ -148,7 +154,7 @@ def run_stress_test(docker_image_name: str) -> None: pr_info = PRInfo() - docker_image = pull_image(get_docker_image(docker_image_name)) + docker_image = pull_image(get_docker_image("clickhouse/stress-test")) packages_path = temp_path / "packages" packages_path.mkdir(parents=True, exist_ok=True) @@ -177,6 +183,7 @@ def run_stress_test(docker_image_name: str) -> None: additional_envs, ci_logs_args, docker_image, + upgrade_check, ) logging.info("Going to run stress test: %s", run_command) @@ -208,4 +215,4 @@ def run_stress_test(docker_image_name: str) -> None: if __name__ == "__main__": - run_stress_test("clickhouse/stress-test") + run_stress_test() diff --git a/tests/ci/tee_popen.py b/tests/ci/tee_popen.py index 13db50df53f..53b0a0f6c2c 100644 --- a/tests/ci/tee_popen.py +++ b/tests/ci/tee_popen.py @@ -2,6 +2,7 @@ import logging import os +import signal import sys from io import TextIOWrapper from pathlib import Path @@ -30,20 +31,34 @@ class TeePopen: self._process = None # type: Optional[Popen] self.timeout = timeout self.timeout_exceeded = False + self.terminated_by_sigterm = False + self.terminated_by_sigkill = False def _check_timeout(self) -> None: if self.timeout is None: return sleep(self.timeout) + logging.warning( + "Timeout exceeded. Send SIGTERM to process %s, timeout %s", + self.process.pid, + self.timeout, + ) + self.send_signal(signal.SIGTERM) + time_wait = 0 + self.terminated_by_sigterm = True self.timeout_exceeded = True + while self.process.poll() is None and time_wait < 100: + print("wait...") + wait = 5 + sleep(wait) + time_wait += wait while self.process.poll() is None: - logging.warning( - "Killing process %s, timeout %s exceeded", - self.process.pid, - self.timeout, + logging.error( + "Process is still running. Send SIGKILL", ) - os.killpg(self.process.pid, 9) - sleep(10) + self.send_signal(signal.SIGKILL) + self.terminated_by_sigkill = True + sleep(5) def __enter__(self) -> "TeePopen": self.process = Popen( @@ -57,6 +72,8 @@ class TeePopen: bufsize=1, errors="backslashreplace", ) + sleep(1) + print(f"Subprocess started, pid [{self.process.pid}]") if self.timeout is not None and self.timeout > 0: t = Thread(target=self._check_timeout) t.daemon = True # does not block the program from exit @@ -85,6 +102,12 @@ class TeePopen: return self.process.wait() + def poll(self): + return self.process.poll() + + def send_signal(self, signal_num): + os.killpg(self.process.pid, signal_num) + @property def process(self) -> Popen: if self._process is not None: diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 525b3bf367b..c3e55aeac06 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -35,10 +35,16 @@ class TestCIConfig(unittest.TestCase): f"Job [{job}] must have style-checker(-aarch64) runner", ) elif "binary_" in job.lower() or "package_" in job.lower(): - self.assertTrue( - CI.JOB_CONFIGS[job].runner_type == CI.Runners.BUILDER, - f"Job [{job}] must have [{CI.Runners.BUILDER}] runner", - ) + if job.lower() == CI.BuildNames.PACKAGE_AARCH64: + self.assertTrue( + CI.JOB_CONFIGS[job].runner_type in (CI.Runners.BUILDER_ARM,), + f"Job [{job}] must have [{CI.Runners.BUILDER_ARM}] runner", + ) + else: + self.assertTrue( + CI.JOB_CONFIGS[job].runner_type in (CI.Runners.BUILDER,), + f"Job [{job}] must have [{CI.Runners.BUILDER}] runner", + ) elif "aarch64" in job.lower(): self.assertTrue( "aarch" in CI.JOB_CONFIGS[job].runner_type, diff --git a/tests/ci/upgrade_check.py b/tests/ci/upgrade_check.py index 83b6f9e299f..8662611dffe 100644 --- a/tests/ci/upgrade_check.py +++ b/tests/ci/upgrade_check.py @@ -1,4 +1,4 @@ import stress_check if __name__ == "__main__": - stress_check.run_stress_test("clickhouse/upgrade-check") + stress_check.run_stress_test(upgrade_check=True) diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index 07a7a9601c0..b20b2bb25cf 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -85,6 +85,16 @@ class ClickHouseVersion: self._tweak = 1 return self + def bump_patch(self) -> "ClickHouseVersion": + self._revision += 1 + self._patch += 1 + self._tweak = 1 + return self + + def reset_tweak(self) -> "ClickHouseVersion": + self._tweak = 1 + return self + def major_update(self) -> "ClickHouseVersion": if self._git is not None: self._git.update() @@ -104,13 +114,6 @@ class ClickHouseVersion: self.major, self.minor, self.patch + 1, self.revision, self._git ) - def reset_tweak(self) -> "ClickHouseVersion": - if self._git is not None: - self._git.update() - return ClickHouseVersion( - self.major, self.minor, self.patch, self.revision, self._git, 1 - ) - @property def major(self) -> int: return self._major diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a3d7e0e922d..4f9380d6f20 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -39,6 +39,7 @@ from errno import ESRCH from subprocess import PIPE, Popen from time import sleep, time from typing import Dict, List, Optional, Set, Tuple, Union +from ast import literal_eval as make_tuple try: import termcolor # type: ignore @@ -59,6 +60,7 @@ MESSAGES_TO_RETRY = [ "is already started to be removing by another replica right now", # This is from LSan, and it indicates its own internal problem: "Unable to get registers from thread", + "You can retry", ] MAX_RETRIES = 3 @@ -1144,9 +1146,24 @@ class TestCase: return description + "\n" + def apply_random_settings_limits(self, random_settings): + for setting in random_settings: + if setting in self.random_settings_limits: + min_value = self.random_settings_limits[setting][0] + if min_value and random_settings[setting] < min_value: + random_settings[setting] = min_value + max_value = self.random_settings_limits[setting][1] + if max_value and random_settings[setting] > max_value: + random_settings[setting] = max_value + def __init__(self, suite, case: str, args, is_concurrent: bool): self.case: str = case # case file name self.tags: Set[str] = suite.all_tags[case] if case in suite.all_tags else set() + self.random_settings_limits = ( + suite.all_random_settings_limits[case] + if case in suite.all_random_settings_limits + else {} + ) for tag in os.getenv("GLOBAL_TAGS", "").split(","): self.tags.add(tag.strip()) @@ -1188,11 +1205,13 @@ class TestCase: if self.randomize_settings: self.random_settings = SettingsRandomizer.get_random_settings(args) + self.apply_random_settings_limits(self.random_settings) if self.randomize_merge_tree_settings: self.merge_tree_random_settings = ( MergeTreeSettingsRandomizer.get_random_settings(args) ) + self.apply_random_settings_limits(self.merge_tree_random_settings) self.base_url_params = ( os.environ["CLICKHOUSE_URL_PARAMS"] @@ -1962,7 +1981,9 @@ class TestSuite: return test_name @staticmethod - def read_test_tags(suite_dir: str, all_tests: List[str]) -> Dict[str, Set[str]]: + def read_test_tags_and_random_settings_limits( + suite_dir: str, all_tests: List[str] + ) -> (Dict[str, Set[str]], Dict[str, Dict[str, Tuple[int, int]]]): def get_comment_sign(filename): if filename.endswith(".sql") or filename.endswith(".sql.j2"): return "--" @@ -1987,27 +2008,58 @@ class TestSuite: tags = {tag.strip() for tag in tags} return tags - def is_shebang(line: str) -> bool: - return line.startswith("#!") + def parse_random_settings_limits_from_line( + line, comment_sign + ) -> Dict[str, Tuple[int, int]]: + if not line.startswith(comment_sign): + return {} + random_settings_limits_str = line[len(comment_sign) :].lstrip() + random_settings_limits_prefix = "Random settings limits:" + if not random_settings_limits_str.startswith(random_settings_limits_prefix): + return {} + random_settings_limits_str = random_settings_limits_str[ + len(random_settings_limits_prefix) : + ] + # limits are specified in a form 'setting1=(min, max); setting2=(min,max); ...' + random_settings_limits = {} + for setting_and_limit in random_settings_limits_str.split(";"): + setting_and_limit = setting_and_limit.split("=") + random_settings_limits[setting_and_limit[0].strip()] = make_tuple( + setting_and_limit[1] + ) + return random_settings_limits - def find_tag_line(file): - for line in file: - line = line.strip() - if line and not is_shebang(line): + def find_tag_line(lines, comment_sign): + for line in lines: + if line.startswith(comment_sign) and line[ + len(comment_sign) : + ].lstrip().startswith("Tags:"): return line return "" - def load_tags_from_file(filepath): + def find_random_settings_limits_line(lines, comment_sign): + for line in lines: + if line.startswith(comment_sign) and line[ + len(comment_sign) : + ].lstrip().startswith("Random settings limits:"): + return line + return "" + + def load_tags_and_random_settings_limits_from_file(filepath): comment_sign = get_comment_sign(filepath) need_query_params = False with open(filepath, "r", encoding="utf-8") as file: try: - tag_line = find_tag_line(file) + lines = file.readlines() + tag_line = find_tag_line(lines, comment_sign) + random_settings_limits_line = find_random_settings_limits_line( + lines, comment_sign + ) except UnicodeDecodeError: - return [] + return [], {} try: if filepath.endswith(".sql"): - for line in file: + for line in lines: if "{CLICKHOUSE_DATABASE" in line: need_query_params = True except UnicodeDecodeError: @@ -2015,18 +2067,31 @@ class TestSuite: parsed_tags = parse_tags_from_line(tag_line, comment_sign) if need_query_params: parsed_tags.add("need-query-parameters") - return parsed_tags + random_settings_limits = parse_random_settings_limits_from_line( + random_settings_limits_line, comment_sign + ) + return parsed_tags, random_settings_limits all_tags = {} + all_random_settings_limits = {} start_time = datetime.now() for test_name in all_tests: - tags = load_tags_from_file(os.path.join(suite_dir, test_name)) + ( + tags, + random_settings_limits, + ) = load_tags_and_random_settings_limits_from_file( + os.path.join(suite_dir, test_name) + ) # noqa: ignore E203 if tags: all_tags[test_name] = tags + if random_settings_limits: + all_random_settings_limits[test_name] = random_settings_limits elapsed = (datetime.now() - start_time).total_seconds() if elapsed > 1: - print(f"Tags for suite {suite_dir} read in {elapsed:.2f} seconds") - return all_tags + print( + f"Tags and random settings limits for suite {suite_dir} read in {elapsed:.2f} seconds" + ) + return all_tags, all_random_settings_limits def __init__(self, args, suite_path: str, suite_tmp_path: str, suite: str): self.args = args @@ -2056,10 +2121,16 @@ class TestSuite: self.all_tests: List[str] = self.get_tests_list( self.tests_in_suite_key_func, filter_func ) - self.all_tags: Dict[str, Set[str]] = self.read_test_tags( - self.suite_path, self.all_tests - ) + all_tags_and_random_settings_limits = ( + self.read_test_tags_and_random_settings_limits( + self.suite_path, self.all_tests + ) + ) + self.all_tags: Dict[str, Set[str]] = all_tags_and_random_settings_limits[0] + self.all_random_settings_limits: Dict[str, Dict[str, (int, int)]] = ( + all_tags_and_random_settings_limits[1] + ) self.sequential_tests = [] self.parallel_tests = [] for test_name in self.all_tests: @@ -2571,12 +2642,12 @@ def do_run_tests(jobs, test_suite: TestSuite): try: clickhouse_execute( args, - query="SELECT 1 /*hang up check*/", - max_http_retries=5, - timeout=20, + query="SELECT 1 /*hung check*/", + max_http_retries=20, + timeout=10, ) except Exception: - print("Hang up check failed") + print("Hung check failed") server_died.set() if server_died.is_set(): diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index e106e3a0e6b..091071f0637 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -27,6 +27,7 @@ 0.3 0.15 0.15 + 0 cache @@ -37,6 +38,7 @@ 100 0 0 + 0 diff --git a/tests/config/config.d/storage_conf_02944.xml b/tests/config/config.d/storage_conf_02944.xml index 5f45640a923..08d78900229 100644 --- a/tests/config/config.d/storage_conf_02944.xml +++ b/tests/config/config.d/storage_conf_02944.xml @@ -19,6 +19,7 @@ 10 100 0 + 0 diff --git a/tests/config/install.sh b/tests/config/install.sh index 7c4b36dc4bd..fda74bd7a8d 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -94,6 +94,7 @@ ln -sf $SRC_PATH/users.d/prefetch_settings.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/nonconst_timezone.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/allow_introspection_functions.yaml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/users.d/replicated_ddl_entry.xml $DEST_SERVER_PATH/users.d/ +ln -sf $SRC_PATH/users.d/limits.yaml $DEST_SERVER_PATH/users.d/ if [[ -n "$USE_OLD_ANALYZER" ]] && [[ "$USE_OLD_ANALYZER" -eq 1 ]]; then ln -sf $SRC_PATH/users.d/analyzer.xml $DEST_SERVER_PATH/users.d/ diff --git a/tests/config/users.d/limits.yaml b/tests/config/users.d/limits.yaml new file mode 100644 index 00000000000..53cbbfa744a --- /dev/null +++ b/tests/config/users.d/limits.yaml @@ -0,0 +1,56 @@ +profiles: + default: + max_memory_usage: 5G + max_rows_to_read: 20000000 + + # Also set every other limit to a high value, so it will not limit anything, but we will test that code around it. + s3_max_get_rps: 1000000 + s3_max_get_burst: 2000000 + s3_max_put_rps: 1000000 + s3_max_put_burst: 2000000 + max_remote_read_network_bandwidth: 1T + max_remote_write_network_bandwidth: 1T + max_local_read_bandwidth: 1T + max_local_write_bandwidth: 1T + use_index_for_in_with_subqueries_max_values: 1G + max_bytes_to_read: 1T + max_bytes_to_read_leaf: 1T + max_rows_to_group_by: 10G + max_bytes_before_external_group_by: 10G + max_rows_to_sort: 10G + max_bytes_to_sort: 10G + max_bytes_before_external_sort: 10G + max_result_rows: 1G + max_result_bytes: 1G + max_execution_time: 600 + max_execution_time_leaf: 600 + max_execution_speed: 100G + max_execution_speed_bytes: 10T + timeout_before_checking_execution_speed: 300 + max_estimated_execution_time: 600 + max_columns_to_read: 20K + max_temporary_columns: 20K + max_temporary_non_const_columns: 20K + max_rows_in_set: 10G + max_bytes_in_set: 10G + max_rows_in_join: 10G + max_bytes_in_join: 10G + max_rows_to_transfer: 1G + max_bytes_to_transfer: 1G + max_rows_in_distinct: 10G + max_bytes_in_distinct: 10G + max_memory_usage_for_user: 32G + max_network_bandwidth: 100G + max_network_bytes: 1T + max_network_bandwidth_for_user: 100G + max_network_bandwidth_for_all_users: 100G + max_temporary_data_on_disk_size_for_user: 100G + max_temporary_data_on_disk_size_for_query: 100G + max_backup_bandwidth: 100G + max_hyperscan_regexp_length: 1M + max_hyperscan_regexp_total_length: 10M + query_cache_max_size_in_bytes: 10M + query_cache_max_entries: 100K + external_storage_max_read_rows: 10G + external_storage_max_read_bytes: 10G + max_streams_for_merge_tree_reading: 1000 diff --git a/docker/test/stateless/attach_gdb.lib b/tests/docker_scripts/attach_gdb.lib similarity index 98% rename from docker/test/stateless/attach_gdb.lib rename to tests/docker_scripts/attach_gdb.lib index 2f1375a2f0f..4170a19176c 100644 --- a/docker/test/stateless/attach_gdb.lib +++ b/tests/docker_scripts/attach_gdb.lib @@ -1,7 +1,7 @@ #!/bin/bash # shellcheck source=./utils.lib -source /utils.lib +source /repo/tests/docker_scripts/utils.lib function attach_gdb_to_clickhouse() { diff --git a/docker/test/stateful/create.sql b/tests/docker_scripts/create.sql similarity index 100% rename from docker/test/stateful/create.sql rename to tests/docker_scripts/create.sql diff --git a/docker/test/fasttest/run.sh b/tests/docker_scripts/fasttest_runner.sh similarity index 95% rename from docker/test/fasttest/run.sh rename to tests/docker_scripts/fasttest_runner.sh index 394d31addb1..1eaba2c7cdf 100755 --- a/docker/test/fasttest/run.sh +++ b/tests/docker_scripts/fasttest_runner.sh @@ -256,22 +256,6 @@ function configure rm -f "$FASTTEST_DATA/config.d/secure_ports.xml" } -function timeout_with_logging() { - local exit_code=0 - - timeout -s TERM --preserve-status "${@}" || exit_code="${?}" - - echo "Checking if it is a timeout. The code 124 will indicate a timeout." - if [[ "${exit_code}" -eq "124" ]] - then - echo "The command 'timeout ${*}' has been killed by timeout." - else - echo "No, it isn't a timeout." - fi - - return $exit_code -} - function run_tests { clickhouse-server --version @@ -340,8 +324,8 @@ case "$stage" in configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt" ;& "run_tests") - timeout_with_logging 35m bash -c run_tests ||: - /process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \ + run_tests ||: + /repo/tests/docker_scripts/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \ --out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \ --out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv" ;; diff --git a/docker/test/util/process_functional_tests_result.py b/tests/docker_scripts/process_functional_tests_result.py similarity index 92% rename from docker/test/util/process_functional_tests_result.py rename to tests/docker_scripts/process_functional_tests_result.py index aa2ea686c46..1dc3090484c 100755 --- a/docker/test/util/process_functional_tests_result.py +++ b/tests/docker_scripts/process_functional_tests_result.py @@ -32,7 +32,7 @@ def process_test_log(log_path, broken_tests): success_finish = False test_results = [] test_end = True - with open(log_path, "r") as test_file: + with open(log_path, "r", encoding="utf-8") as test_file: for line in test_file: original_line = line line = line.strip() @@ -116,7 +116,7 @@ def process_test_log(log_path, broken_tests): test[0], test[1], test[2], - "".join(test[3])[:4096].replace("\t", "\\t").replace("\n", "\\n"), + "".join(test[3])[:8192].replace("\t", "\\t").replace("\n", "\\n"), ] for test in test_results ] @@ -150,7 +150,7 @@ def process_result(result_path, broken_tests): if result_path and os.path.exists(result_path): ( - total, + _total, skipped, unknown, failed, @@ -191,11 +191,11 @@ def process_result(result_path, broken_tests): else: description = "" - description += "fail: {}, passed: {}".format(failed, success) + description += f"fail: {failed}, passed: {success}" if skipped != 0: - description += ", skipped: {}".format(skipped) + description += f", skipped: {skipped}" if unknown != 0: - description += ", unknown: {}".format(unknown) + description += f", unknown: {unknown}" else: state = "failure" description = "Output log doesn't exist" @@ -205,10 +205,10 @@ def process_result(result_path, broken_tests): def write_results(results_file, status_file, results, status): - with open(results_file, "w") as f: + with open(results_file, "w", encoding="utf-8") as f: out = csv.writer(f, delimiter="\t") out.writerows(results) - with open(status_file, "w") as f: + with open(status_file, "w", encoding="utf-8") as f: out = csv.writer(f, delimiter="\t") out.writerow(status) @@ -221,15 +221,15 @@ if __name__ == "__main__": parser.add_argument("--in-results-dir", default="/test_output/") parser.add_argument("--out-results-file", default="/test_output/test_results.tsv") parser.add_argument("--out-status-file", default="/test_output/check_status.tsv") - parser.add_argument("--broken-tests", default="/analyzer_tech_debt.txt") + parser.add_argument("--broken-tests", default="/repo/tests/analyzer_tech_debt.txt") args = parser.parse_args() - broken_tests = list() + broken_tests = [] if os.path.exists(args.broken_tests): - logging.info(f"File {args.broken_tests} with broken tests found") - with open(args.broken_tests) as f: + print(f"File {args.broken_tests} with broken tests found") + with open(args.broken_tests, encoding="utf-8") as f: broken_tests = f.read().splitlines() - logging.info(f"Broken tests in the list: {len(broken_tests)}") + print(f"Broken tests in the list: {len(broken_tests)}") state, description, test_results = process_result(args.in_results_dir, broken_tests) logging.info("Result parsed") diff --git a/docker/test/stateless/setup_hdfs_minicluster.sh b/tests/docker_scripts/setup_hdfs_minicluster.sh similarity index 95% rename from docker/test/stateless/setup_hdfs_minicluster.sh rename to tests/docker_scripts/setup_hdfs_minicluster.sh index 15a54f59096..622270ba5d5 100755 --- a/docker/test/stateless/setup_hdfs_minicluster.sh +++ b/tests/docker_scripts/setup_hdfs_minicluster.sh @@ -5,7 +5,7 @@ set -e -x -a -u ls -lha -cd hadoop-3.3.1 +cd /hadoop-3.3.1 export JAVA_HOME=/usr mkdir -p target/test/data diff --git a/docker/test/stateless/setup_minio.sh b/tests/docker_scripts/setup_minio.sh similarity index 98% rename from docker/test/stateless/setup_minio.sh rename to tests/docker_scripts/setup_minio.sh index c1508df7e82..40e93e713a1 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/tests/docker_scripts/setup_minio.sh @@ -143,7 +143,7 @@ main() { fi start_minio setup_minio "$1" - upload_data "${query_dir}" "${2:-/usr/share/clickhouse-test}" + upload_data "${query_dir}" "${2:-/repo/tests/}" setup_aws_credentials } diff --git a/docker/test/stateful/run.sh b/tests/docker_scripts/stateful_runner.sh similarity index 92% rename from docker/test/stateful/run.sh rename to tests/docker_scripts/stateful_runner.sh index 3a4f0d97993..86f6a299ad3 100755 --- a/docker/test/stateful/run.sh +++ b/tests/docker_scripts/stateful_runner.sh @@ -4,9 +4,6 @@ source /setup_export_logs.sh set -e -x -MAX_RUN_TIME=${MAX_RUN_TIME:-3600} -MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 3600 : MAX_RUN_TIME)) - # Choose random timezone for this test run TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)" echo "Choosen random timezone $TZ" @@ -17,17 +14,17 @@ dpkg -i package_folder/clickhouse-common-static-dbg_*.deb dpkg -i package_folder/clickhouse-server_*.deb dpkg -i package_folder/clickhouse-client_*.deb -ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test +ln -s /repo/tests/clickhouse-test /usr/bin/clickhouse-test # shellcheck disable=SC1091 -source /utils.lib +source /repo/tests/docker_scripts/utils.lib # install test configs -/usr/share/clickhouse-test/config/install.sh +/repo/tests/config/install.sh azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence & -./setup_minio.sh stateful +/repo/tests/docker_scripts/setup_minio.sh stateful ./mc admin trace clickminio > /test_output/minio.log & MC_ADMIN_PID=$! @@ -108,7 +105,7 @@ setup_logs_replication clickhouse-client --query "SHOW DATABASES" clickhouse-client --query "CREATE DATABASE datasets" -clickhouse-client --multiquery < create.sql +clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql clickhouse-client --query "SHOW TABLES FROM datasets" if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then @@ -118,14 +115,11 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] clickhouse-client --query "CREATE TABLE test.hits AS datasets.hits_v1" clickhouse-client --query "CREATE TABLE test.visits AS datasets.visits_v1" - clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1" - clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1" clickhouse-client --query "DROP TABLE datasets.hits_v1" clickhouse-client --query "DROP TABLE datasets.visits_v1" - - MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours) - MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited) else clickhouse-client --query "CREATE DATABASE test" clickhouse-client --query "SHOW TABLES FROM test" @@ -191,8 +185,8 @@ else ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" - clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" + clickhouse-client --max_memory_usage 10G --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC" clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC" else @@ -200,7 +194,8 @@ else clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" fi clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'" - clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" + # AWS S3 is very inefficient, so increase memory even further: + clickhouse-client --max_memory_usage 30G --max_memory_usage_for_user 30G --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16" fi clickhouse-client --query "SHOW TABLES FROM test" @@ -242,6 +237,7 @@ function run_tests() --hung-check --print-time --capture-client-stacktrace + --queries "/repo/tests/queries" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" ) @@ -257,31 +253,14 @@ function run_tests() export -f run_tests -function timeout_with_logging() { - local exit_code=0 - - timeout -s TERM --preserve-status "${@}" || exit_code="${?}" - - echo "Checking if it is a timeout. The code 124 will indicate a timeout." - if [[ "${exit_code}" -eq "124" ]] - then - echo "The command 'timeout ${*}' has been killed by timeout." - else - echo "No, it isn't a timeout." - fi - - return $exit_code -} - -TIMEOUT=$((MAX_RUN_TIME - 700)) -timeout_with_logging "$TIMEOUT" bash -c run_tests ||: +run_tests ||: echo "Files in current directory" ls -la ./ echo "Files in root directory" ls -la / -/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv +/repo/tests/docker_scripts/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv sudo clickhouse stop ||: if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then diff --git a/docker/test/stateless/run.sh b/tests/docker_scripts/stateless_runner.sh similarity index 93% rename from docker/test/stateless/run.sh rename to tests/docker_scripts/stateless_runner.sh index c70cbe1fe45..671b1f5ca71 100755 --- a/docker/test/stateless/run.sh +++ b/tests/docker_scripts/stateless_runner.sh @@ -1,10 +1,13 @@ #!/bin/bash +# fail on errors, verbose and export all env variables +set -e -x -a + # shellcheck disable=SC1091 source /setup_export_logs.sh # shellcheck source=../stateless/stress_tests.lib -source /stress_tests.lib +source /repo/tests/docker_scripts/stress_tests.lib # Avoid overlaps with previous runs dmesg --clear @@ -12,9 +15,6 @@ dmesg --clear # fail on errors, verbose and export all env variables set -e -x -a -MAX_RUN_TIME=${MAX_RUN_TIME:-9000} -MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 9000 : MAX_RUN_TIME)) - USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0} USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0} @@ -42,20 +42,22 @@ if [[ -z "$BUGFIX_VALIDATE_CHECK" ]]; then chc --version || exit 1 fi -ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test +ln -sf /repo/tests/clickhouse-test /usr/bin/clickhouse-test + +export CLICKHOUSE_GRPC_CLIENT="/repo/utils/grpc-client/clickhouse-grpc-client.py" # shellcheck disable=SC1091 -source /attach_gdb.lib +source /repo/tests/docker_scripts/attach_gdb.lib # shellcheck disable=SC1091 -source /utils.lib +source /repo/tests/docker_scripts/utils.lib # install test configs -/usr/share/clickhouse-test/config/install.sh +/repo/tests/config/install.sh -./setup_minio.sh stateless +/repo/tests/docker_scripts/setup_minio.sh stateless -./setup_hdfs_minicluster.sh +/repo/tests/docker_scripts/setup_hdfs_minicluster.sh config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml @@ -308,8 +310,6 @@ function run_tests() try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')" - TIMEOUT=$((MAX_RUN_TIME - 800 > 8400 ? 8400 : MAX_RUN_TIME - 800)) - START_TIME=${SECONDS} set +e TEST_ARGS=( @@ -321,42 +321,33 @@ function run_tests() --print-time --no-drop-if-fail --capture-client-stacktrace + --queries "/repo/tests/queries" --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" ) - timeout --preserve-status --signal TERM --kill-after 60m ${TIMEOUT}s clickhouse-test "${TEST_ARGS[@]}" 2>&1 \ + clickhouse-test "${TEST_ARGS[@]}" 2>&1 \ | ts '%Y-%m-%d %H:%M:%S' \ | tee -a test_output/test_result.txt set -e - DURATION=$((SECONDS - START_TIME)) - - echo "Elapsed ${DURATION} seconds." - if [[ $DURATION -ge $TIMEOUT ]] - then - echo "It looks like the command is terminated by the timeout, which is ${TIMEOUT} seconds." - fi } export -f run_tests - -# This should be enough to setup job and collect artifacts -TIMEOUT=$((MAX_RUN_TIME - 700)) if [ "$NUM_TRIES" -gt "1" ]; then # We don't run tests with Ordinary database in PRs, only in master. # So run new/changed tests with Ordinary at least once in flaky check. - timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \ + NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests \ | sed 's/All tests have finished/Redacted: a message about tests finish is deleted/' | sed 's/No tests were run/Redacted: a message about no tests run is deleted/' ||: fi -timeout_with_logging "$TIMEOUT" bash -c run_tests ||: +run_tests ||: echo "Files in current directory" ls -la ./ echo "Files in root directory" ls -la / -/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv +/repo/tests/docker_scripts/process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv clickhouse-client -q "system flush logs" ||: @@ -391,8 +382,8 @@ done # wait for minio to flush its batch if it has any sleep 1 clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE" -clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" -clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_bytes 0 --max_result_rows 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_bytes 0 --max_result_rows 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" # Stop server so we can safely read data with clickhouse-local. # Why do we read data with clickhouse-local? diff --git a/docker/test/stress/run.sh b/tests/docker_scripts/stress_runner.sh old mode 100644 new mode 100755 similarity index 97% rename from docker/test/stress/run.sh rename to tests/docker_scripts/stress_runner.sh index b21114e456f..7666398e10b --- a/docker/test/stress/run.sh +++ b/tests/docker_scripts/stress_runner.sh @@ -3,26 +3,25 @@ # shellcheck disable=SC2086 # shellcheck disable=SC2024 +set -x + # Avoid overlaps with previous runs dmesg --clear # shellcheck disable=SC1091 source /setup_export_logs.sh -set -x - -# we mount tests folder from repo to /usr/share -ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress -ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test +ln -s /repo/tests/clickhouse-test/ci/stress.py /usr/bin/stress +ln -s /repo/tests/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test # Stress tests and upgrade check uses similar code that was placed # in a separate bash library. See tests/ci/stress_tests.lib # shellcheck source=../stateless/attach_gdb.lib -source /attach_gdb.lib +source /repo/tests/docker_scripts/attach_gdb.lib # shellcheck source=../stateless/stress_tests.lib -source /stress_tests.lib +source /repo/tests/docker_scripts/stress_tests.lib # shellcheck disable=SC1091 -source /utils.lib +source /repo/tests/docker_scripts/utils.lib install_packages package_folder @@ -55,7 +54,7 @@ export ZOOKEEPER_FAULT_INJECTION=1 # available for dump via clickhouse-local configure -./setup_minio.sh stateless # to have a proper environment +/repo/tests/docker_scripts/setup_minio.sh stateless # to have a proper environment config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml @@ -64,7 +63,7 @@ start_server setup_logs_replication clickhouse-client --query "CREATE DATABASE datasets" -clickhouse-client --multiquery < create.sql +clickhouse-client --multiquery < /repo/tests/docker_scripts/create.sql clickhouse-client --query "SHOW TABLES FROM datasets" clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" @@ -267,7 +266,7 @@ fi start_server -stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \ +python3 /repo/tests/ci/stress.py --hung-check --drop-databases --output-folder /test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \ && echo -e "Test script exit code$OK" >> /test_output/test_results.tsv \ || echo -e "Test script failed$FAIL script exit code: $?" >> /test_output/test_results.tsv diff --git a/docker/test/stateless/stress_tests.lib b/tests/docker_scripts/stress_tests.lib similarity index 98% rename from docker/test/stateless/stress_tests.lib rename to tests/docker_scripts/stress_tests.lib index 51aa299f7a6..4f3e6eeb2f4 100644 --- a/docker/test/stateless/stress_tests.lib +++ b/tests/docker_scripts/stress_tests.lib @@ -42,7 +42,7 @@ function configure() # install test configs export USE_DATABASE_ORDINARY=1 export EXPORT_S3_STORAGE_POLICIES=1 - /usr/share/clickhouse-test/config/install.sh + /repo/tests/config/install.sh # avoid too slow startup sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \ @@ -273,7 +273,7 @@ function check_logs_for_critical_errors() [ -s /test_output/no_such_key_errors.txt ] || rm /test_output/no_such_key_errors.txt # Crash - rg -Fa "########################################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \ + rg -Fa "###################""#####################" /var/log/clickhouse-server/clickhouse-server*.log > /dev/null \ && echo -e "Killed by signal (in clickhouse-server.log)$FAIL" >> /test_output/test_results.tsv \ || echo -e "Not crashed$OK" >> /test_output/test_results.tsv @@ -285,7 +285,7 @@ function check_logs_for_critical_errors() # Remove file fatal_messages.txt if it's empty [ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt - rg -Faz "########################################" /test_output/* > /dev/null \ + rg -Faz "####################""####################" /test_output/* > /dev/null \ && echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv function get_gdb_log_context() diff --git a/docker/test/upgrade/run.sh b/tests/docker_scripts/upgrade_runner.sh old mode 100644 new mode 100755 similarity index 93% rename from docker/test/upgrade/run.sh rename to tests/docker_scripts/upgrade_runner.sh index a4c4c75e5b3..ece75ebf782 --- a/docker/test/upgrade/run.sh +++ b/tests/docker_scripts/upgrade_runner.sh @@ -9,20 +9,20 @@ dmesg --clear set -x # we mount tests folder from repo to /usr/share -ln -s /usr/share/clickhouse-test/ci/stress.py /usr/bin/stress -ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test -ln -s /usr/share/clickhouse-test/ci/download_release_packages.py /usr/bin/download_release_packages -ln -s /usr/share/clickhouse-test/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag +ln -s /repo/tests/ci/stress.py /usr/bin/stress +ln -s /repo/tests/clickhouse-test /usr/bin/clickhouse-test +ln -s /repo/tests/ci/download_release_packages.py /usr/bin/download_release_packages +ln -s /repo/tests/ci/get_previous_release_tag.py /usr/bin/get_previous_release_tag # Stress tests and upgrade check uses similar code that was placed # in a separate bash library. See tests/ci/stress_tests.lib # shellcheck source=../stateless/attach_gdb.lib -source /attach_gdb.lib +source /repo/tests/docker_scripts/attach_gdb.lib # shellcheck source=../stateless/stress_tests.lib -source /stress_tests.lib +source /repo/tests/docker_scripts/stress_tests.lib azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log & -./setup_minio.sh stateless # to have a proper environment +/repo/tests/docker_scripts/setup_minio.sh stateless # to have a proper environment echo "Get previous release tag" # shellcheck disable=SC2016 @@ -129,6 +129,7 @@ configure # Check that all new/changed setting were added in settings changes history. # Some settings can be different for builds with sanitizers, so we check +# Also the automatic value of 'max_threads' and similar was displayed as "'auto(...)'" in previous versions instead of "auto(...)". # settings changes only for non-sanitizer builds. IS_SANITIZED=$(clickhouse-local --query "SELECT value LIKE '%-fsanitize=%' FROM system.build_options WHERE name = 'CXX_FLAGS'") if [ "${IS_SANITIZED}" -eq "0" ] @@ -145,7 +146,9 @@ then old_settings.value AS old_value FROM new_settings LEFT JOIN old_settings ON new_settings.name = old_settings.name - WHERE (new_settings.value != old_settings.value) AND (name NOT IN ( + WHERE (new_value != old_value) + AND NOT (startsWith(new_value, 'auto(') AND old_value LIKE '%auto(%') + AND (name NOT IN ( SELECT arrayJoin(tupleElement(changes, 'name')) FROM ( @@ -177,7 +180,7 @@ then if [ -s changed_settings.txt ] then mv changed_settings.txt /test_output/ - echo -e "Changed settings are not reflected in settings changes history (see changed_settings.txt)$FAIL$(head_escaped /test_output/changed_settings.txt)" >> /test_output/test_results.tsv + echo -e "Changed settings are not reflected in the settings changes history (see changed_settings.txt)$FAIL$(head_escaped /test_output/changed_settings.txt)" >> /test_output/test_results.tsv else echo -e "There are no changed settings or they are reflected in settings changes history$OK" >> /test_output/test_results.tsv fi diff --git a/docker/test/stateless/utils.lib b/tests/docker_scripts/utils.lib similarity index 69% rename from docker/test/stateless/utils.lib rename to tests/docker_scripts/utils.lib index cb257536c36..31cd67254b4 100644 --- a/docker/test/stateless/utils.lib +++ b/tests/docker_scripts/utils.lib @@ -40,22 +40,6 @@ function fn_exists() { declare -F "$1" > /dev/null; } -function timeout_with_logging() { - local exit_code=0 - - timeout -s TERM --preserve-status "${@}" || exit_code="${?}" - - echo "Checking if it is a timeout. The code 124 will indicate a timeout." - if [[ "${exit_code}" -eq "124" ]] - then - echo "The command 'timeout ${*}' has been killed by timeout." - else - echo "No, it isn't a timeout." - fi - - return $exit_code -} - function collect_core_dumps() { find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 215718463e8..53f4f1e1f26 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -570,6 +570,8 @@ class ClickHouseCluster: self.spark_session = None self.with_azurite = False + self.azurite_container = "azurite-container" + self.blob_service_client = None self._azurite_port = 0 # available when with_hdfs == True @@ -2692,6 +2694,32 @@ class ClickHouseCluster: connection_string ) logging.debug(blob_service_client.get_account_information()) + containers = [ + c + for c in blob_service_client.list_containers( + name_starts_with=self.azurite_container + ) + if c.name == self.azurite_container + ] + if len(containers) > 0: + for c in containers: + blob_service_client.delete_container(c) + + container_client = blob_service_client.get_container_client( + self.azurite_container + ) + if container_client.exists(): + logging.debug( + f"azurite container '{self.azurite_container}' exist, deleting all blobs" + ) + for b in container_client.list_blobs(): + container_client.delete_blob(b.name) + else: + logging.debug( + f"azurite container '{self.azurite_container}' doesn't exist, creating it" + ) + container_client.create_container() + self.blob_service_client = blob_service_client return except Exception as ex: diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index fca2126d824..507894534d4 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -1,6 +1,7 @@ [ "test_dns_cache/test.py::test_dns_cache_update", "test_dns_cache/test.py::test_ip_change_drop_dns_cache", + "test_dns_cache/test.py::test_dns_resolver_filter", "test_dns_cache/test.py::test_ip_change_update_dns_cache", "test_dns_cache/test.py::test_user_access_ip_change[node0]", "test_dns_cache/test.py::test_user_access_ip_change[node1]", diff --git a/tests/integration/test_async_metrics_in_cgroup/__init__.py b/tests/integration/test_async_metrics_in_cgroup/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_async_metrics_in_cgroup/test.py b/tests/integration/test_async_metrics_in_cgroup/test.py new file mode 100644 index 00000000000..d9f2e3aaaed --- /dev/null +++ b/tests/integration/test_async_metrics_in_cgroup/test.py @@ -0,0 +1,69 @@ +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance("node1", stay_alive=True) +node2 = cluster.add_instance("node2", stay_alive=True) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def run_cpu_intensive_task(node): + node.query( + "SELECT sum(*) FROM system.numbers_mt FORMAT Null SETTINGS max_execution_time=10", + ignore_error=True, + ) + + +def get_async_metric(node, metric): + node.query("SYSTEM FLUSH LOGS") + return node.query( + f""" + SELECT max(value) + FROM ( + SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value + FROM system.asynchronous_metric_log + WHERE event_time >= now() - 60 AND metric = '{metric}' + GROUP BY t + ) + SETTINGS max_threads = 1 + """ + ).strip("\n") + + +def test_user_cpu_accounting(start_cluster): + if node1.is_built_with_sanitizer(): + pytest.skip("Disabled for sanitizers") + + # run query on the other node, its usage shouldn't be accounted by node1 + run_cpu_intensive_task(node2) + + node1_cpu_time = get_async_metric(node1, "OSUserTime") + assert float(node1_cpu_time) < 2 + + # then let's test that we will account cpu time spent by the server itself + node2_cpu_time = get_async_metric(node2, "OSUserTime") + # this check is really weak, but CI is tough place and we cannot guarantee that test process will get many cpu time + assert float(node2_cpu_time) > 2 + + +def test_normalized_user_cpu(start_cluster): + if node1.is_built_with_sanitizer(): + pytest.skip("Disabled for sanitizers") + + # run query on the other node, its usage shouldn't be accounted by node1 + run_cpu_intensive_task(node2) + + node1_cpu_time = get_async_metric(node1, "OSUserTimeNormalized") + assert float(node1_cpu_time) < 1.01 + + node2_cpu_time = get_async_metric(node2, "OSUserTimeNormalized") + assert float(node2_cpu_time) < 1.01 diff --git a/tests/integration/test_cgroup_limit/test.py b/tests/integration/test_cgroup_limit/test.py index e77b0f70960..5d56135d9ff 100644 --- a/tests/integration/test_cgroup_limit/test.py +++ b/tests/integration/test_cgroup_limit/test.py @@ -46,7 +46,7 @@ def test_cgroup_cpu_limit(): "clickhouse local -q \"select value from system.settings where name='max_threads'\"", num_cpus, ) - expect_output = (r"\'auto({})\'".format(math.ceil(num_cpus))).encode() + expect_output = (r"auto({})".format(math.ceil(num_cpus))).encode() assert ( result.strip() == expect_output ), f"fail for cpu limit={num_cpus}, result={result.strip()}, expect={expect_output}" diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index 73f2888ce00..76a0f30f82e 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -708,7 +708,7 @@ def test_no_key_found_disk(cluster, broken_s3): """ SELECT value FROM system.metrics - WHERE metric = 'S3DiskNoKeyErrors' + WHERE metric = 'DiskS3NoSuchKeyErrors' """ ).strip() ) diff --git a/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py index a480ee3f278..f1034e26b25 100644 --- a/tests/integration/test_delayed_replica_failover/test.py +++ b/tests/integration/test_delayed_replica_failover/test.py @@ -20,21 +20,30 @@ node_1_2 = cluster.add_instance("node_1_2", with_zookeeper=True) node_2_1 = cluster.add_instance("node_2_1", with_zookeeper=True) node_2_2 = cluster.add_instance("node_2_2", with_zookeeper=True) +# For test to be runnable multiple times +seqno = 0 + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() + yield cluster + finally: + cluster.shutdown() + +@pytest.fixture(scope="function", autouse=True) +def create_tables(): + global seqno + try: + seqno += 1 for shard in (1, 2): for replica in (1, 2): node = cluster.instances["node_{}_{}".format(shard, replica)] node.query( - """ -CREATE TABLE replicated (d Date, x UInt32) ENGINE = - ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{instance}') PARTITION BY toYYYYMM(d) ORDER BY d""".format( - shard=shard, instance=node.name - ) + f"CREATE TABLE replicated (d Date, x UInt32) ENGINE = " + f"ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated_{seqno}', '{node.name}') PARTITION BY toYYYYMM(d) ORDER BY d" ) node_1_1.query( @@ -42,10 +51,15 @@ CREATE TABLE replicated (d Date, x UInt32) ENGINE = "Distributed('test_cluster', 'default', 'replicated')" ) - yield cluster + yield finally: - cluster.shutdown() + node_1_1.query("DROP TABLE distributed") + + node_1_1.query("DROP TABLE replicated") + node_1_2.query("DROP TABLE replicated") + node_2_1.query("DROP TABLE replicated") + node_2_2.query("DROP TABLE replicated") def test(started_cluster): @@ -101,7 +115,9 @@ SELECT sum(x) FROM distributed WITH TOTALS SETTINGS # allow pings to zookeeper to timeout (must be greater than ZK session timeout). for _ in range(30): try: - node_2_2.query("SELECT * FROM system.zookeeper where path = '/'") + node_2_2.query( + "SELECT * FROM system.zookeeper where path = '/' SETTINGS insert_keeper_max_retries = 0" + ) time.sleep(0.5) except: break @@ -120,7 +136,7 @@ SELECT sum(x) FROM distributed SETTINGS == "3" ) - # Regression for skip_unavailable_shards in conjunction with skip_unavailable_shards + # Prefer fallback_to_stale_replicas over skip_unavailable_shards assert ( instance_with_dist_table.query( """ diff --git a/tests/integration/test_dictionaries_postgresql/test.py b/tests/integration/test_dictionaries_postgresql/test.py index 516ac27ea26..010ecdb5084 100644 --- a/tests/integration/test_dictionaries_postgresql/test.py +++ b/tests/integration/test_dictionaries_postgresql/test.py @@ -530,10 +530,61 @@ def test_bad_configuration(started_cluster): """ ) - node1.query_and_get_error( + assert "Unexpected key `dbbb`" in node1.query_and_get_error( "SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(1))" ) - assert node1.contains_in_log("Unexpected key `dbbb`") + + +def test_named_collection_from_ddl(started_cluster): + cursor = started_cluster.postgres_conn.cursor() + cursor.execute("DROP TABLE IF EXISTS test_table") + cursor.execute("CREATE TABLE test_table (id integer, value integer)") + + node1.query( + """ + DROP NAMED COLLECTION IF EXISTS pg_conn; + CREATE NAMED COLLECTION pg_conn + AS user = 'postgres', password = 'mysecretpassword', host = 'postgres1', port = 5432, database = 'postgres', table = 'test_table'; + """ + ) + + cursor.execute( + "INSERT INTO test_table SELECT i, i FROM generate_series(0, 99) as t(i)" + ) + + node1.query( + """ + DROP DICTIONARY IF EXISTS postgres_dict; + CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) + PRIMARY KEY id + SOURCE(POSTGRESQL(NAME pg_conn)) + LIFETIME(MIN 1 MAX 2) + LAYOUT(HASHED()); + """ + ) + result = node1.query("SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))") + assert int(result.strip()) == 99 + + node1.query( + """ + DROP NAMED COLLECTION IF EXISTS pg_conn_2; + CREATE NAMED COLLECTION pg_conn_2 + AS user = 'postgres', password = 'mysecretpassword', host = 'postgres1', port = 5432, dbbb = 'postgres', table = 'test_table'; + """ + ) + node1.query( + """ + DROP DICTIONARY IF EXISTS postgres_dict; + CREATE DICTIONARY postgres_dict (id UInt32, value UInt32) + PRIMARY KEY id + SOURCE(POSTGRESQL(NAME pg_conn_2)) + LIFETIME(MIN 1 MAX 2) + LAYOUT(HASHED()); + """ + ) + assert "Unexpected key `dbbb`" in node1.query_and_get_error( + "SELECT dictGetUInt32(postgres_dict, 'value', toUInt64(99))" + ) if __name__ == "__main__": diff --git a/tests/integration/test_distributed_type_object/test.py b/tests/integration/test_distributed_type_object/test.py index e274bd6b774..64acdda887b 100644 --- a/tests/integration/test_distributed_type_object/test.py +++ b/tests/integration/test_distributed_type_object/test.py @@ -16,7 +16,7 @@ def started_cluster(): for node in (node1, node2): node.query( - "CREATE TABLE local_table(id UInt32, data JSON) ENGINE = MergeTree ORDER BY id", + "CREATE TABLE local_table(id UInt32, data Object('json')) ENGINE = MergeTree ORDER BY id", settings={"allow_experimental_object_type": 1}, ) node.query( diff --git a/tests/integration/test_dns_cache/test.py b/tests/integration/test_dns_cache/test.py index a6db26c8575..36401517429 100644 --- a/tests/integration/test_dns_cache/test.py +++ b/tests/integration/test_dns_cache/test.py @@ -317,3 +317,74 @@ def test_host_is_drop_from_cache_after_consecutive_failures( assert node4.wait_for_log_line( "Cached hosts dropped:.*InvalidHostThatDoesNotExist.*" ) + + +node7 = cluster.add_instance( + "node7", + main_configs=["configs/listen_host.xml", "configs/dns_update_long.xml"], + with_zookeeper=True, + ipv6_address="2001:3984:3989::1:1117", + ipv4_address="10.5.95.17", +) + + +def _render_filter_config(allow_ipv4, allow_ipv6): + config = f""" + + {int(allow_ipv4)} + {int(allow_ipv6)} + + """ + return config + + +@pytest.mark.parametrize( + "allow_ipv4, allow_ipv6", + [ + (True, False), + (False, True), + (False, False), + ], +) +def test_dns_resolver_filter(cluster_without_dns_cache_update, allow_ipv4, allow_ipv6): + node = node7 + host_ipv6 = node.ipv6_address + host_ipv4 = node.ipv4_address + + node.set_hosts( + [ + (host_ipv6, "test_host"), + (host_ipv4, "test_host"), + ] + ) + node.replace_config( + "/etc/clickhouse-server/config.d/dns_filter.xml", + _render_filter_config(allow_ipv4, allow_ipv6), + ) + + node.query("SYSTEM RELOAD CONFIG") + node.query("SYSTEM DROP DNS CACHE") + node.query("SYSTEM DROP CONNECTIONS CACHE") + + if not allow_ipv4 and not allow_ipv6: + with pytest.raises(QueryRuntimeException): + node.query("SELECT * FROM remote('lost_host', 'system', 'one')") + else: + node.query("SELECT * FROM remote('test_host', system, one)") + assert ( + node.query( + "SELECT ip_address FROM system.dns_cache WHERE hostname='test_host'" + ) + == f"{host_ipv4 if allow_ipv4 else host_ipv6}\n" + ) + + node.exec_in_container( + [ + "bash", + "-c", + "rm /etc/clickhouse-server/config.d/dns_filter.xml", + ], + privileged=True, + user="root", + ) + node.query("SYSTEM RELOAD CONFIG") diff --git a/tests/integration/test_filesystem_cache/test.py b/tests/integration/test_filesystem_cache/test.py index 17a8dd8b6e1..aee8bd25c2e 100644 --- a/tests/integration/test_filesystem_cache/test.py +++ b/tests/integration/test_filesystem_cache/test.py @@ -1,6 +1,7 @@ import logging import time import os +import random import pytest from helpers.cluster import ClickHouseCluster @@ -30,14 +31,6 @@ def cluster(): "config.d/storage_conf_2.xml", ], ) - cluster.add_instance( - "node_no_filesystem_caches_path", - main_configs=[ - "config.d/storage_conf.xml", - "config.d/remove_filesystem_caches_path.xml", - ], - stay_alive=True, - ) cluster.add_instance( "node_force_read_through_cache_on_merge", main_configs=[ @@ -59,6 +52,51 @@ def cluster(): cluster.shutdown() +@pytest.fixture(scope="function") +def non_shared_cluster(): + """ + For tests that cannot run in parallel against the same node/cluster (see test_custom_cached_disk, which relies on + changing server settings at runtime) + """ + try: + # Randomize the cluster name + cluster = ClickHouseCluster(f"{__file__}_non_shared_{random.randint(0, 10**7)}") + cluster.add_instance( + "node_no_filesystem_caches_path", + main_configs=[ + "config.d/storage_conf.xml", + "config.d/remove_filesystem_caches_path.xml", + ], + stay_alive=True, + ) + + logging.info("Starting test-exclusive cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def wait_for_cache_initialized(node, cache_path, max_attempts=50): + initialized = False + attempts = 0 + while not initialized: + query_result = node.query( + "SELECT path FROM system.filesystem_cache_settings WHERE is_initialized" + ) + initialized = cache_path in query_result + + if initialized: + break + + time.sleep(0.1) + attempts += 1 + if attempts >= max_attempts: + raise "Stopped waiting for cache to be initialized" + + @pytest.mark.parametrize("node_name", ["node"]) def test_parallel_cache_loading_on_startup(cluster, node_name): node = cluster.instances[node_name] @@ -71,14 +109,21 @@ def test_parallel_cache_loading_on_startup(cluster, node_name): ORDER BY value SETTINGS disk = disk( type = cache, - path = 'paralel_loading_test', + name = 'parallel_loading_test', + path = 'parallel_loading_test', disk = 'hdd_blob', max_file_segment_size = '1Ki', boundary_alignment = '1Ki', max_size = '1Gi', max_elements = 10000000, load_metadata_threads = 30); + """ + ) + wait_for_cache_initialized(node, "parallel_loading_test") + + node.query( + """ SYSTEM DROP FILESYSTEM CACHE; INSERT INTO test SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000000; SELECT * FROM test FORMAT Null; @@ -103,6 +148,7 @@ def test_parallel_cache_loading_on_startup(cluster, node_name): ) node.restart_clickhouse() + wait_for_cache_initialized(node, "parallel_loading_test") # < because of additional files loaded into cache on server startup. assert cache_count <= int(node.query("SELECT count() FROM system.filesystem_cache")) @@ -131,7 +177,7 @@ def test_caches_with_the_same_configuration(cluster, node_name): node = cluster.instances[node_name] cache_path = "cache1" - node.query(f"SYSTEM DROP FILESYSTEM CACHE;") + node.query("SYSTEM DROP FILESYSTEM CACHE;") for table in ["test", "test2"]: node.query( f""" @@ -142,14 +188,20 @@ def test_caches_with_the_same_configuration(cluster, node_name): ORDER BY value SETTINGS disk = disk( type = cache, - name = {table}, + name = '{table}', path = '{cache_path}', disk = 'hdd_blob', max_file_segment_size = '1Ki', boundary_alignment = '1Ki', cache_on_write_operations=1, max_size = '1Mi'); + """ + ) + wait_for_cache_initialized(node, cache_path) + + node.query( + f""" SET enable_filesystem_cache_on_write_operations=1; INSERT INTO {table} SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000; @@ -195,9 +247,8 @@ def test_caches_with_the_same_configuration(cluster, node_name): @pytest.mark.parametrize("node_name", ["node_caches_with_same_path"]) def test_caches_with_the_same_configuration_2(cluster, node_name): node = cluster.instances[node_name] - cache_path = "cache1" - node.query(f"SYSTEM DROP FILESYSTEM CACHE;") + node.query("SYSTEM DROP FILESYSTEM CACHE;") for table in ["cache1", "cache2"]: node.query( f""" @@ -207,7 +258,13 @@ def test_caches_with_the_same_configuration_2(cluster, node_name): Engine=MergeTree() ORDER BY value SETTINGS disk = '{table}'; + """ + ) + wait_for_cache_initialized(node, "cache1") + + node.query( + f""" SET enable_filesystem_cache_on_write_operations=1; INSERT INTO {table} SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000; @@ -227,8 +284,8 @@ def test_caches_with_the_same_configuration_2(cluster, node_name): ) -def test_custom_cached_disk(cluster): - node = cluster.instances["node_no_filesystem_caches_path"] +def test_custom_cached_disk(non_shared_cluster): + node = non_shared_cluster.instances["node_no_filesystem_caches_path"] assert "Cannot create cached custom disk without" in node.query_and_get_error( f""" @@ -377,6 +434,7 @@ def test_force_filesystem_cache_on_merges(cluster): ORDER BY value SETTINGS disk = disk( type = cache, + name = 'force_cache_on_merges', path = 'force_cache_on_merges', disk = 'hdd_blob', max_file_segment_size = '1Ki', @@ -385,7 +443,13 @@ def test_force_filesystem_cache_on_merges(cluster): max_size = '10Gi', max_elements = 10000000, load_metadata_threads = 30); + """ + ) + wait_for_cache_initialized(node, "force_cache_on_merges") + + node.query( + """ SYSTEM DROP FILESYSTEM CACHE; INSERT INTO test SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000000; INSERT INTO test SELECT * FROM generateRandom('a Int32, b String') LIMIT 1000000; @@ -441,7 +505,13 @@ SETTINGS disk = disk(type = cache, path = "test_system_sync_filesystem_cache", delayed_cleanup_interval_ms = 10000000, disk = hdd_blob), min_bytes_for_wide_part = 10485760; + """ + ) + wait_for_cache_initialized(node, "test_system_sync_filesystem_cache") + + node.query( + """ INSERT INTO test SELECT 1, 'test'; """ ) @@ -525,7 +595,13 @@ SETTINGS disk = disk(type = cache, keep_free_space_elements_ratio = {elements_ratio}, disk = hdd_blob), min_bytes_for_wide_part = 10485760; + """ + ) + wait_for_cache_initialized(node, "test_keep_up_size_ratio") + + node.query( + """ INSERT INTO test SELECT randomString(200); """ ) diff --git a/tests/integration/test_hedged_requests/test.py b/tests/integration/test_hedged_requests/test.py index 02ecf3c1367..0d72f7c45b1 100644 --- a/tests/integration/test_hedged_requests/test.py +++ b/tests/integration/test_hedged_requests/test.py @@ -333,7 +333,7 @@ def test_receive_timeout2(started_cluster): # in packet receiving but there are replicas in process of # connection establishing. update_configs( - node_1_sleep_in_send_data=4000, + node_1_sleep_in_send_data=5000, node_2_sleep_in_send_tables_status=2000, node_3_sleep_in_send_tables_status=2000, ) diff --git a/tests/integration/test_incorrect_datetime_format/__init__.py b/tests/integration/test_incorrect_datetime_format/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_incorrect_datetime_format/configs/config.d/cluster.xml b/tests/integration/test_incorrect_datetime_format/configs/config.d/cluster.xml new file mode 100644 index 00000000000..a27968fb3d2 --- /dev/null +++ b/tests/integration/test_incorrect_datetime_format/configs/config.d/cluster.xml @@ -0,0 +1,11 @@ + + + + + + node + + + + + diff --git a/tests/integration/test_incorrect_datetime_format/configs/config.xml b/tests/integration/test_incorrect_datetime_format/configs/config.xml new file mode 100644 index 00000000000..053b5d30418 --- /dev/null +++ b/tests/integration/test_incorrect_datetime_format/configs/config.xml @@ -0,0 +1,9 @@ + + + information + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + diff --git a/tests/integration/test_incorrect_datetime_format/test.py b/tests/integration/test_incorrect_datetime_format/test.py new file mode 100644 index 00000000000..3cdc6781534 --- /dev/null +++ b/tests/integration/test_incorrect_datetime_format/test.py @@ -0,0 +1,54 @@ +import logging +import pytest +from helpers.cluster import ClickHouseCluster + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance( + "node", + main_configs=[ + "configs/config.d/cluster.xml", + ], + ) + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + node = cluster.instances["node"] + node.query( + """ + CREATE TABLE tab + ( + a DateTime, + pk String + ) Engine = MergeTree() ORDER BY pk; + """ + ) + + yield cluster + finally: + cluster.shutdown() + + +def test_incorrect_datetime_format(cluster): + """ + Test for an MSan issue which is caused by parsing incorrect datetime string + """ + + node = cluster.instances["node"] + + res = node.query("SELECT count(*) FROM tab WHERE a = '2024-08-06 09:58:09'").strip() + assert res == "0" + + error = node.query_and_get_error( + "SELECT count(*) FROM tab WHERE a = '2024-08-06 09:58:0'" + ).strip() + assert "Cannot parse time component of DateTime 09:58:0" in error + + error = node.query_and_get_error( + "SELECT count(*) FROM tab WHERE a = '2024-08-0 09:58:09'" + ).strip() + assert "Cannot convert string '2024-08-0 09:58:09' to type DateTime" in error diff --git a/tests/integration/test_mask_sensitive_info/test.py b/tests/integration/test_mask_sensitive_info/test.py index 6f6dc4d287f..8d5345082ff 100644 --- a/tests/integration/test_mask_sensitive_info/test.py +++ b/tests/integration/test_mask_sensitive_info/test.py @@ -202,6 +202,10 @@ def test_create_table(): f"S3Queue('http://minio1:9001/root/data/', 'CSV', 'gzip') settings mode = 'ordered'", f"S3Queue('http://minio1:9001/root/data/', 'minio', '{password}', 'CSV') settings mode = 'ordered'", f"S3Queue('http://minio1:9001/root/data/', 'minio', '{password}', 'CSV', 'gzip') settings mode = 'ordered'", + ( + f"Iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')", + "DNS_ERROR", + ), ] def make_test_case(i): @@ -266,6 +270,7 @@ def test_create_table(): # due to sensitive data substituion the query will be normalized, so not "settings" but "SETTINGS" "CREATE TABLE table19 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV') SETTINGS mode = 'ordered'", "CREATE TABLE table20 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV', 'gzip') SETTINGS mode = 'ordered'", + "CREATE TABLE table21 (`x` int) ENGINE = Iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')", ], must_not_contain=[password], ) @@ -387,6 +392,7 @@ def test_table_functions(): f"azureBlobStorageCluster('test_shard_localhost', '{azure_storage_account_url}', 'cont', 'test_simple_15.csv', '{azure_account_name}', '{azure_account_key}', 'CSV', 'none', 'auto')", f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, connection_string = '{azure_conn_string}', container = 'cont', blob_path = 'test_simple_16.csv', format = 'CSV')", f"azureBlobStorageCluster('test_shard_localhost', named_collection_2, storage_account_url = '{azure_storage_account_url}', container = 'cont', blob_path = 'test_simple_17.csv', account_name = '{azure_account_name}', account_key = '{azure_account_key}')", + f"iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')", ] def make_test_case(i): @@ -478,6 +484,7 @@ def test_table_functions(): f"CREATE TABLE tablefunc48 (`x` int) AS azureBlobStorageCluster('test_shard_localhost', '{azure_storage_account_url}', 'cont', 'test_simple_15.csv', '{azure_account_name}', '[HIDDEN]', 'CSV', 'none', 'auto')", f"CREATE TABLE tablefunc49 (x int) AS azureBlobStorageCluster('test_shard_localhost', named_collection_2, connection_string = '{azure_conn_string}', container = 'cont', blob_path = 'test_simple_16.csv', format = 'CSV')", f"CREATE TABLE tablefunc50 (`x` int) AS azureBlobStorageCluster('test_shard_localhost', named_collection_2, storage_account_url = '{azure_storage_account_url}', container = 'cont', blob_path = 'test_simple_17.csv', account_name = '{azure_account_name}', account_key = '[HIDDEN]')", + "CREATE TABLE tablefunc51 (`x` int) AS iceberg('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')", ], must_not_contain=[password], ) diff --git a/tests/integration/test_named_collections_encrypted/__init__.py b/tests/integration/test_named_collections_encrypted/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_named_collections_encrypted/configs/config.d/named_collections_encrypted.xml b/tests/integration/test_named_collections_encrypted/configs/config.d/named_collections_encrypted.xml new file mode 100644 index 00000000000..233e23846cb --- /dev/null +++ b/tests/integration/test_named_collections_encrypted/configs/config.d/named_collections_encrypted.xml @@ -0,0 +1,12 @@ + + + local_encrypted + bebec0cabebec0cabebec0cabebec0ca + + + + + value1 + + + diff --git a/tests/integration/test_named_collections_encrypted/configs/config.d/named_collections_with_zookeeper_encrypted.xml b/tests/integration/test_named_collections_encrypted/configs/config.d/named_collections_with_zookeeper_encrypted.xml new file mode 100644 index 00000000000..d1dd5c29787 --- /dev/null +++ b/tests/integration/test_named_collections_encrypted/configs/config.d/named_collections_with_zookeeper_encrypted.xml @@ -0,0 +1,31 @@ + + + zookeeper_encrypted + bebec0cabebec0cabebec0cabebec0ca + /named_collections_path/ + 5000 + + + + + value1 + + + + + + + true + + node_with_keeper + 9000 + + + node_with_keeper_2 + 9000 + + + true + + + diff --git a/tests/integration/test_named_collections_encrypted/configs/users.d/users.xml b/tests/integration/test_named_collections_encrypted/configs/users.d/users.xml new file mode 100644 index 00000000000..7d4f0543ff1 --- /dev/null +++ b/tests/integration/test_named_collections_encrypted/configs/users.d/users.xml @@ -0,0 +1,17 @@ + + + + 0 + + + + + + default + default + 1 + 1 + 1 + + + diff --git a/tests/integration/test_named_collections_encrypted/test.py b/tests/integration/test_named_collections_encrypted/test.py new file mode 100644 index 00000000000..7dff32fa6c9 --- /dev/null +++ b/tests/integration/test_named_collections_encrypted/test.py @@ -0,0 +1,123 @@ +import logging +import pytest +import os +from helpers.cluster import ClickHouseCluster + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +NAMED_COLLECTIONS_CONFIG = os.path.join( + SCRIPT_DIR, "./configs/config.d/named_collections.xml" +) + +ZK_PATH = "/named_collections_path" + + +@pytest.fixture(scope="module") +def cluster(): + try: + cluster = ClickHouseCluster(__file__) + cluster.add_instance( + "node_encrypted", + main_configs=[ + "configs/config.d/named_collections_encrypted.xml", + ], + user_configs=[ + "configs/users.d/users.xml", + ], + stay_alive=True, + ) + cluster.add_instance( + "node_with_keeper_encrypted", + main_configs=[ + "configs/config.d/named_collections_with_zookeeper_encrypted.xml", + ], + user_configs=[ + "configs/users.d/users.xml", + ], + stay_alive=True, + with_zookeeper=True, + ) + cluster.add_instance( + "node_with_keeper_2_encrypted", + main_configs=[ + "configs/config.d/named_collections_with_zookeeper_encrypted.xml", + ], + user_configs=[ + "configs/users.d/users.xml", + ], + stay_alive=True, + with_zookeeper=True, + ) + + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def check_encrypted_content(node, zk=None): + assert ( + "collection1\ncollection2" + == node.query("select name from system.named_collections").strip() + ) + + assert ( + "['key1','key2']" + == node.query( + "select mapKeys(collection) from system.named_collections where name = 'collection2'" + ).strip() + ) + + assert ( + "1234\tvalue2" + == node.query( + "select collection['key1'], collection['key2'] from system.named_collections where name = 'collection2'" + ).strip() + ) + + # Check that the underlying storage is encrypted + content = ( + zk.get(ZK_PATH + "/collection2.sql")[0] + if zk is not None + else open( + f"{node.path}/database/named_collections/collection2.sql", "rb" + ).read() + ) + + assert ( + content[0:3] == b"ENC" + ) # file signature (aka magic number) of the encrypted file + assert b"key1" not in content + assert b"1234" not in content + assert b"key2" not in content + assert b"value2" not in content + + +def test_local_storage_encrypted(cluster): + node = cluster.instances["node_encrypted"] + node.query("CREATE NAMED COLLECTION collection2 AS key1=1234, key2='value2'") + + check_encrypted_content(node) + node.restart_clickhouse() + check_encrypted_content(node) + + node.query("DROP NAMED COLLECTION collection2") + + +def test_zookeper_storage_encrypted(cluster): + node1 = cluster.instances["node_with_keeper_encrypted"] + node2 = cluster.instances["node_with_keeper_2_encrypted"] + zk = cluster.get_kazoo_client("zoo1") + + node1.query("CREATE NAMED COLLECTION collection2 AS key1=1234, key2='value2'") + + check_encrypted_content(node1, zk) + check_encrypted_content(node2, zk) + node1.restart_clickhouse() + node2.restart_clickhouse() + check_encrypted_content(node1, zk) + check_encrypted_content(node2, zk) + + node1.query("DROP NAMED COLLECTION collection2") diff --git a/tests/integration/test_odbc_interaction/test.py b/tests/integration/test_odbc_interaction/test.py index 0d0d7a0afb1..9d4ca5ad49f 100644 --- a/tests/integration/test_odbc_interaction/test.py +++ b/tests/integration/test_odbc_interaction/test.py @@ -51,9 +51,9 @@ create_table_sql_nullable_template = """ """ -def skip_test_msan(instance): - if instance.is_built_with_memory_sanitizer(): - pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") +def skip_test_sanitizers(instance): + if instance.is_built_with_sanitizer(): + pytest.skip("Sanitizers cannot work with third-party shared libraries") def get_mysql_conn(): @@ -208,7 +208,7 @@ def started_cluster(): def test_mysql_odbc_select_nullable(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) mysql_setup = node1.odbc_drivers["MySQL"] table_name = "test_insert_nullable_select" @@ -248,7 +248,7 @@ def test_mysql_odbc_select_nullable(started_cluster): def test_mysql_simple_select_works(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) mysql_setup = node1.odbc_drivers["MySQL"] @@ -331,7 +331,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, column_x Nulla def test_mysql_insert(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) mysql_setup = node1.odbc_drivers["MySQL"] table_name = "test_insert" @@ -374,7 +374,7 @@ def test_mysql_insert(started_cluster): def test_sqlite_simple_select_function_works(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -438,7 +438,7 @@ def test_sqlite_simple_select_function_works(started_cluster): def test_sqlite_table_function(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -470,7 +470,7 @@ def test_sqlite_table_function(started_cluster): def test_sqlite_simple_select_storage_works(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_setup = node1.odbc_drivers["SQLite3"] sqlite_db = sqlite_setup["Database"] @@ -503,7 +503,7 @@ def test_sqlite_simple_select_storage_works(started_cluster): def test_sqlite_odbc_hashed_dictionary(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] node1.exec_in_container( @@ -586,7 +586,7 @@ def test_sqlite_odbc_hashed_dictionary(started_cluster): def test_sqlite_odbc_cached_dictionary(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) sqlite_db = node1.odbc_drivers["SQLite3"]["Database"] node1.exec_in_container( @@ -635,7 +635,7 @@ def test_sqlite_odbc_cached_dictionary(started_cluster): def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -663,7 +663,7 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster): def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -685,7 +685,7 @@ def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster): def test_no_connection_pooling(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -717,7 +717,7 @@ def test_no_connection_pooling(started_cluster): def test_postgres_insert(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) @@ -754,7 +754,7 @@ def test_postgres_insert(started_cluster): def test_odbc_postgres_date_data_type(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -783,7 +783,7 @@ def test_odbc_postgres_date_data_type(started_cluster): def test_odbc_postgres_conversions(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) try: conn = get_postgres_conn(started_cluster) @@ -841,7 +841,7 @@ def test_odbc_postgres_conversions(started_cluster): def test_odbc_cyrillic_with_varchar(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() @@ -868,7 +868,7 @@ def test_odbc_cyrillic_with_varchar(started_cluster): def test_many_connections(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() @@ -894,7 +894,7 @@ def test_many_connections(started_cluster): def test_concurrent_queries(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() @@ -948,7 +948,7 @@ def test_concurrent_queries(started_cluster): def test_odbc_long_column_names(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() @@ -986,7 +986,7 @@ def test_odbc_long_column_names(started_cluster): def test_odbc_long_text(started_cluster): - skip_test_msan(node1) + skip_test_sanitizers(node1) conn = get_postgres_conn(started_cluster) cursor = conn.cursor() diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index 406b50bc486..7fdd17625a9 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -953,12 +953,14 @@ def test_generated_columns(started_cluster): "", f"""CREATE TABLE {table} ( key integer PRIMARY KEY, - x integer, + x integer DEFAULT 0, + temp integer DEFAULT 0, y integer GENERATED ALWAYS AS (x*2) STORED, - z text); + z text DEFAULT 'z'); """, ) + pg_manager.execute(f"alter table {table} drop column temp;") pg_manager.execute(f"insert into {table} (key, x, z) values (1,1,'1');") pg_manager.execute(f"insert into {table} (key, x, z) values (2,2,'2');") @@ -991,6 +993,44 @@ def test_generated_columns(started_cluster): ) +def test_generated_columns_with_sequence(started_cluster): + table = "test_generated_columns_with_sequence" + + pg_manager.create_postgres_table( + table, + "", + f"""CREATE TABLE {table} ( + key integer PRIMARY KEY, + x integer, + y integer GENERATED ALWAYS AS (x*2) STORED, + z text); + """, + ) + + pg_manager.execute( + f"create sequence {table}_id_seq increment by 1 minvalue 1 start 1;" + ) + pg_manager.execute( + f"alter table {table} alter key set default nextval('{table}_id_seq');" + ) + pg_manager.execute(f"insert into {table} (key, x, z) values (1,1,'1');") + pg_manager.execute(f"insert into {table} (key, x, z) values (2,2,'2');") + + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_tables_list = '{table}'", + "materialized_postgresql_backoff_min_ms = 100", + "materialized_postgresql_backoff_max_ms = 100", + ], + ) + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + def test_default_columns(started_cluster): table = "test_default_columns" @@ -1087,9 +1127,13 @@ def test_dependent_loading(started_cluster): nested_time = instance.query( f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{uuid}_nested' and message not like '%like%'" ).strip() - time = instance.query( - f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{table}' and message not like '%like%'" - ).strip() + time = ( + instance.query( + f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{table}' and message not like '%like%'" + ) + .strip() + .split("\n")[-1] + ) instance.query( f"SELECT toDateTime64('{nested_time}', 6) < toDateTime64('{time}', 6)" ) diff --git a/tests/integration/test_s3_imds/test_simple.py b/tests/integration/test_s3_imds/test_simple.py index 0dacac2b0b9..4884c824f99 100644 --- a/tests/integration/test_s3_imds/test_simple.py +++ b/tests/integration/test_s3_imds/test_simple.py @@ -56,7 +56,7 @@ def test_credentials_from_metadata(): ) expected_logs = [ - "Calling EC2MetadataService to get token failed, falling back to less secure way", + "Calling EC2MetadataService to get token failed, falling back to a less secure way", "Getting default credentials for ec2 instance from resolver:8080", "Calling EC2MetadataService resource, /latest/meta-data/iam/security-credentials returned credential string myrole", "Calling EC2MetadataService resource /latest/meta-data/iam/security-credentials/myrole", diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index 054b79ff6fe..75a4b6cc221 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -464,7 +464,7 @@ def test_restart_broken(started_cluster): """ SELECT value FROM system.metrics - WHERE metric = 'S3DiskNoKeyErrors' + WHERE metric = 'DiskS3NoSuchKeyErrors' """ ).strip() ) @@ -572,7 +572,7 @@ def test_partition_columns(started_cluster): "test" + str(i), datetime.strptime(f"2000-01-0{i}", "%Y-%m-%d"), i, - False, + False if i % 2 == 0 else True, ) ] df = spark.createDataFrame(data=data, schema=schema) @@ -622,15 +622,15 @@ def test_partition_columns(started_cluster): ENGINE=DeltaLake('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{result_file}/', 'minio', 'minio123')""" ) assert ( - """1 test1 2000-01-01 1 false + """1 test1 2000-01-01 1 true 2 test2 2000-01-02 2 false -3 test3 2000-01-03 3 false +3 test3 2000-01-03 3 true 4 test4 2000-01-04 4 false -5 test5 2000-01-05 5 false +5 test5 2000-01-05 5 true 6 test6 2000-01-06 6 false -7 test7 2000-01-07 7 false +7 test7 2000-01-07 7 true 8 test8 2000-01-08 8 false -9 test9 2000-01-09 9 false""" +9 test9 2000-01-09 9 true""" == instance.query(f"SELECT * FROM {TABLE_NAME} ORDER BY b").strip() ) @@ -670,7 +670,7 @@ test9 2000-01-09 9""" "test" + str(i), datetime.strptime(f"2000-01-{i}", "%Y-%m-%d"), i, - False, + False if i % 2 == 0 else True, ) ] df = spark.createDataFrame(data=data, schema=schema) @@ -696,23 +696,23 @@ test9 2000-01-09 9""" assert result == num_rows * 2 assert ( - """1 test1 2000-01-01 1 false + """1 test1 2000-01-01 1 true 2 test2 2000-01-02 2 false -3 test3 2000-01-03 3 false +3 test3 2000-01-03 3 true 4 test4 2000-01-04 4 false -5 test5 2000-01-05 5 false +5 test5 2000-01-05 5 true 6 test6 2000-01-06 6 false -7 test7 2000-01-07 7 false +7 test7 2000-01-07 7 true 8 test8 2000-01-08 8 false -9 test9 2000-01-09 9 false +9 test9 2000-01-09 9 true 10 test10 2000-01-10 10 false -11 test11 2000-01-11 11 false +11 test11 2000-01-11 11 true 12 test12 2000-01-12 12 false -13 test13 2000-01-13 13 false +13 test13 2000-01-13 13 true 14 test14 2000-01-14 14 false -15 test15 2000-01-15 15 false +15 test15 2000-01-15 15 true 16 test16 2000-01-16 16 false -17 test17 2000-01-17 17 false +17 test17 2000-01-17 17 true 18 test18 2000-01-18 18 false""" == instance.query( f""" diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py index 5948954ff5f..c724c5bb498 100644 --- a/tests/integration/test_storage_mysql/test.py +++ b/tests/integration/test_storage_mysql/test.py @@ -445,7 +445,7 @@ def test_mysql_distributed(started_cluster): query = "SELECT * FROM (" for i in range(3): query += "SELECT name FROM test_replicas UNION DISTINCT " - query += "SELECT name FROM test_replicas)" + query += "SELECT name FROM test_replicas) ORDER BY name" result = node2.query(query) assert result == "host2\nhost3\nhost4\n" @@ -827,6 +827,9 @@ def test_settings(started_cluster): f"with settings: connect_timeout={connect_timeout}, read_write_timeout={rw_timeout}" ) + node1.query("DROP DATABASE IF EXISTS m") + node1.query("DROP DATABASE IF EXISTS mm") + rw_timeout = 40123001 connect_timeout = 40123002 node1.query( @@ -855,6 +858,9 @@ def test_settings(started_cluster): f"with settings: connect_timeout={connect_timeout}, read_write_timeout={rw_timeout}" ) + node1.query("DROP DATABASE m") + node1.query("DROP DATABASE mm") + drop_mysql_table(conn, table_name) conn.close() @@ -930,6 +936,9 @@ def test_joins(started_cluster): conn.commit() + node1.query("DROP TABLE IF EXISTS test_joins_table_users") + node1.query("DROP TABLE IF EXISTS test_joins_table_tickets") + node1.query( """ CREATE TABLE test_joins_table_users @@ -964,6 +973,9 @@ def test_joins(started_cluster): """ ) == "281607\tFeedback\t2024-06-25 12:09:41\tuser@example.com\n" + node1.query("DROP TABLE test_joins_table_users") + node1.query("DROP TABLE test_joins_table_tickets") + if __name__ == "__main__": with contextmanager(started_cluster)() as cluster: diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 8f197e09e61..9e3ee19179a 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1,6 +1,7 @@ import io import logging import random +import string import time import pytest @@ -13,7 +14,6 @@ from uuid import uuid4 AVAILABLE_MODES = ["unordered", "ordered"] DEFAULT_AUTH = ["'minio'", "'minio123'"] NO_AUTH = ["NOSIGN"] -AZURE_CONTAINER_NAME = "cont" def prepare_public_s3_bucket(started_cluster): @@ -68,13 +68,24 @@ def s3_queue_setup_teardown(started_cluster): instance = started_cluster.instances["instance"] instance_2 = started_cluster.instances["instance2"] - instance.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") - instance_2.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") + instance.query("DROP DATABASE IF EXISTS default; CREATE DATABASE default;") + instance_2.query("DROP DATABASE IF EXISTS default; CREATE DATABASE default;") minio = started_cluster.minio_client objects = list(minio.list_objects(started_cluster.minio_bucket, recursive=True)) for obj in objects: minio.remove_object(started_cluster.minio_bucket, obj.object_name) + + container_client = started_cluster.blob_service_client.get_container_client( + started_cluster.azurite_container + ) + + if container_client.exists(): + blob_names = [b.name for b in container_client.list_blobs()] + logging.debug(f"Deleting blobs: {blob_names}") + for b in blob_names: + container_client.delete_blob(b) + yield # run test @@ -129,11 +140,6 @@ def started_cluster(): cluster.start() logging.info("Cluster started") - container_client = cluster.blob_service_client.get_container_client( - AZURE_CONTAINER_NAME - ) - container_client.create_container() - yield cluster finally: cluster.shutdown() @@ -190,7 +196,7 @@ def put_s3_file_content(started_cluster, filename, data, bucket=None): def put_azure_file_content(started_cluster, filename, data, bucket=None): client = started_cluster.blob_service_client.get_blob_client( - AZURE_CONTAINER_NAME, filename + started_cluster.azurite_container, filename ) buf = io.BytesIO(data) client.upload_blob(buf, "BlockBlob", len(data)) @@ -226,7 +232,7 @@ def create_table( url = f"http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{files_path}/" engine_def = f"{engine_name}('{url}', {auth_params}, {file_format})" else: - engine_def = f"{engine_name}('{started_cluster.env_variables['AZURITE_CONNECTION_STRING']}', 'cont', '{files_path}/', 'CSV')" + engine_def = f"{engine_name}('{started_cluster.env_variables['AZURITE_CONNECTION_STRING']}', '{started_cluster.azurite_container}', '{files_path}/', 'CSV')" node.query(f"DROP TABLE IF EXISTS {table_name}") create_query = f""" @@ -262,15 +268,21 @@ def create_mv( ) +def generate_random_string(length=6): + return "".join(random.choice(string.ascii_lowercase) for i in range(length)) + + @pytest.mark.parametrize("mode", ["unordered", "ordered"]) @pytest.mark.parametrize("engine_name", ["S3Queue", "AzureQueue"]) def test_delete_after_processing(started_cluster, mode, engine_name): node = started_cluster.instances["instance"] - table_name = f"test.delete_after_processing_{mode}_{engine_name}" + table_name = f"delete_after_processing_{mode}_{engine_name}" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" files_num = 5 row_num = 10 + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" if engine_name == "S3Queue": storage = "s3" else: @@ -285,7 +297,7 @@ def test_delete_after_processing(started_cluster, mode, engine_name): table_name, mode, files_path, - additional_settings={"after_processing": "delete"}, + additional_settings={"after_processing": "delete", "keeper_path": keeper_path}, engine_name=engine_name, ) create_mv(node, table_name, dst_table_name) @@ -313,7 +325,7 @@ def test_delete_after_processing(started_cluster, mode, engine_name): assert len(objects) == 0 else: client = started_cluster.blob_service_client.get_container_client( - AZURE_CONTAINER_NAME + started_cluster.azurite_container ) objects_iterator = client.list_blobs(files_path) for objects in objects_iterator: @@ -324,11 +336,12 @@ def test_delete_after_processing(started_cluster, mode, engine_name): @pytest.mark.parametrize("engine_name", ["S3Queue", "AzureQueue"]) def test_failed_retry(started_cluster, mode, engine_name): node = started_cluster.instances["instance"] - table_name = f"test.failed_retry_{mode}_{engine_name}" + table_name = f"failed_retry_{mode}_{engine_name}" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" file_path = f"{files_path}/trash_test.csv" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" retries_num = 3 values = [ @@ -385,8 +398,9 @@ def test_failed_retry(started_cluster, mode, engine_name): @pytest.mark.parametrize("mode", AVAILABLE_MODES) def test_direct_select_file(started_cluster, mode): node = started_cluster.instances["instance"] - table_name = f"test.direct_select_file_{mode}" - keeper_path = f"/clickhouse/test_{table_name}" + table_name = f"direct_select_file_{mode}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{mode}_{generate_random_string()}" files_path = f"{table_name}_data" file_path = f"{files_path}/test.csv" @@ -447,7 +461,7 @@ def test_direct_select_file(started_cluster, mode): ] == [] # New table with different zookeeper path - keeper_path = f"/clickhouse/test_{table_name}_{mode}_2" + keeper_path = f"{keeper_path}_2" create_table( started_cluster, node, @@ -491,8 +505,17 @@ def test_direct_select_multiple_files(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"direct_select_multiple_files_{mode}" files_path = f"{table_name}_data" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" - create_table(started_cluster, node, table_name, mode, files_path) + create_table( + started_cluster, + node, + table_name, + mode, + files_path, + additional_settings={"keeper_path": keeper_path}, + ) for i in range(5): rand_values = [[random.randint(0, 50) for _ in range(3)] for _ in range(10)] values_csv = ( @@ -515,14 +538,23 @@ def test_direct_select_multiple_files(started_cluster, mode): @pytest.mark.parametrize("mode", AVAILABLE_MODES) -def test_streaming_to_view_(started_cluster, mode): +def test_streaming_to_view(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"streaming_to_view_{mode}" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" total_values = generate_random_files(started_cluster, files_path, 10) - create_table(started_cluster, node, table_name, mode, files_path) + create_table( + started_cluster, + node, + table_name, + mode, + files_path, + additional_settings={"keeper_path": keeper_path}, + ) create_mv(node, table_name, dst_table_name) expected_values = set([tuple(i) for i in total_values]) @@ -544,7 +576,8 @@ def test_streaming_to_many_views(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"streaming_to_many_views_{mode}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" for i in range(3): @@ -582,7 +615,8 @@ def test_streaming_to_many_views(started_cluster, mode): def test_multiple_tables_meta_mismatch(started_cluster): node = started_cluster.instances["instance"] table_name = f"multiple_tables_meta_mismatch" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" create_table( @@ -675,7 +709,8 @@ def test_multiple_tables_streaming_sync(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"multiple_tables_streaming_sync_{mode}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 @@ -756,7 +791,10 @@ def test_multiple_tables_streaming_sync(started_cluster, mode): def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): node = started_cluster.instances["instance"] node_2 = started_cluster.instances["instance2"] - table_name = f"multiple_tables_streaming_sync_distributed_{mode}" + # A unique table name is necessary for repeatable tests + table_name = ( + f"multiple_tables_streaming_sync_distributed_{mode}_{generate_random_string()}" + ) dst_table_name = f"{table_name}_dst" keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" @@ -833,7 +871,8 @@ def test_max_set_age(started_cluster): node = started_cluster.instances["instance"] table_name = "max_set_age" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" max_age = 20 files_to_generate = 10 @@ -944,10 +983,9 @@ def test_max_set_age(started_cluster): def test_max_set_size(started_cluster): node = started_cluster.instances["instance"] table_name = f"max_set_size" - dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" - max_age = 10 files_to_generate = 10 create_table( @@ -991,7 +1029,8 @@ def test_drop_table(started_cluster): node = started_cluster.instances["instance"] table_name = f"test_drop" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 @@ -1021,9 +1060,11 @@ def test_drop_table(started_cluster): def test_s3_client_reused(started_cluster): node = started_cluster.instances["instance"] - table_name = f"test.test_s3_client_reused" + table_name = f"test_s3_client_reused" dst_table_name = f"{table_name}_dst" files_path = f"{table_name}_data" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" row_num = 10 def get_created_s3_clients_count(): @@ -1057,6 +1098,7 @@ def test_s3_client_reused(started_cluster): additional_settings={ "after_processing": "delete", "s3queue_processing_threads_num": 1, + "keeper_path": keeper_path, }, auth=NO_AUTH, bucket=started_cluster.minio_public_bucket, @@ -1114,7 +1156,8 @@ def test_processing_threads(started_cluster, mode): node = started_cluster.instances["instance"] table_name = f"processing_threads_{mode}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 processing_threads = 32 @@ -1181,7 +1224,8 @@ def test_shards(started_cluster, mode, processing_threads): node = started_cluster.instances["instance"] table_name = f"test_shards_{mode}_{processing_threads}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 shards_num = 3 @@ -1300,7 +1344,7 @@ where zookeeper_path ilike '%{table_name}%' and status = 'Processed' and rows_pr pytest.param("unordered", 1), pytest.param("unordered", 8), pytest.param("ordered", 1), - pytest.param("ordered", 8), + pytest.param("ordered", 2), ], ) def test_shards_distributed(started_cluster, mode, processing_threads): @@ -1308,10 +1352,11 @@ def test_shards_distributed(started_cluster, mode, processing_threads): node_2 = started_cluster.instances["instance2"] table_name = f"test_shards_distributed_{mode}_{processing_threads}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 300 - row_num = 50 + row_num = 300 total_rows = row_num * files_to_generate shards_num = 2 @@ -1461,8 +1506,8 @@ def test_settings_check(started_cluster): node = started_cluster.instances["instance"] node_2 = started_cluster.instances["instance2"] table_name = f"test_settings_check" - dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" mode = "ordered" @@ -1504,7 +1549,10 @@ def test_processed_file_setting(started_cluster, processing_threads): node = started_cluster.instances["instance"] table_name = f"test_processed_file_setting_{processing_threads}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}_{processing_threads}" + # A unique path is necessary for repeatable tests + keeper_path = ( + f"/clickhouse/test_{table_name}_{processing_threads}_{generate_random_string()}" + ) files_path = f"{table_name}_data" files_to_generate = 10 @@ -1555,7 +1603,10 @@ def test_processed_file_setting_distributed(started_cluster, processing_threads) node_2 = started_cluster.instances["instance2"] table_name = f"test_processed_file_setting_distributed_{processing_threads}" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = ( + f"/clickhouse/test_{table_name}_{processing_threads}_{generate_random_string()}" + ) files_path = f"{table_name}_data" files_to_generate = 10 @@ -1609,7 +1660,8 @@ def test_upgrade(started_cluster): table_name = f"test_upgrade" dst_table_name = f"{table_name}_dst" - keeper_path = f"/clickhouse/test_{table_name}" + # A unique path is necessary for repeatable tests + keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" files_to_generate = 10 @@ -1648,7 +1700,8 @@ def test_upgrade(started_cluster): def test_exception_during_insert(started_cluster): node = started_cluster.instances["instance_too_many_parts"] - table_name = f"test_exception_during_insert" + # A unique table name is necessary for repeatable tests + table_name = f"test_exception_during_insert_{generate_random_string()}" dst_table_name = f"{table_name}_dst" keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" @@ -1664,6 +1717,7 @@ def test_exception_during_insert(started_cluster): "keeper_path": keeper_path, }, ) + node.rotate_logs() total_values = generate_random_files( started_cluster, files_path, files_to_generate, start_ind=0, row_num=1 ) @@ -1680,33 +1734,49 @@ def test_exception_during_insert(started_cluster): ) assert "Too many parts" in exception + original_parts_to_throw_insert = 0 + modified_parts_to_throw_insert = 10 node.replace_in_config( "/etc/clickhouse-server/config.d/merge_tree.xml", - "parts_to_throw_insert>0", - "parts_to_throw_insert>10", + f"parts_to_throw_insert>{original_parts_to_throw_insert}", + f"parts_to_throw_insert>{modified_parts_to_throw_insert}", ) - node.restart_clickhouse() + try: + node.restart_clickhouse() - def get_count(): - return int(node.query(f"SELECT count() FROM {dst_table_name}")) + def get_count(): + return int(node.query(f"SELECT count() FROM {dst_table_name}")) - expected_rows = 10 - for _ in range(20): - if expected_rows == get_count(): - break - time.sleep(1) - assert expected_rows == get_count() + expected_rows = 10 + for _ in range(20): + if expected_rows == get_count(): + break + time.sleep(1) + assert expected_rows == get_count() + finally: + node.replace_in_config( + "/etc/clickhouse-server/config.d/merge_tree.xml", + f"parts_to_throw_insert>{modified_parts_to_throw_insert}", + f"parts_to_throw_insert>{original_parts_to_throw_insert}", + ) + node.restart_clickhouse() def test_commit_on_limit(started_cluster): node = started_cluster.instances["instance"] - table_name = f"test_commit_on_limit" + # A unique table name is necessary for repeatable tests + table_name = f"test_commit_on_limit_{generate_random_string()}" dst_table_name = f"{table_name}_dst" keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" files_to_generate = 10 + failed_files_event_before = int( + node.query( + "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" + ) + ) create_table( started_cluster, node, @@ -1782,6 +1852,9 @@ def test_commit_on_limit(started_cluster): assert "test_999999.csv" in get_processed_files() assert 1 == int( + node.count_in_log(f"Setting file {files_path}/test_9999.csv as failed") + ) + assert failed_files_event_before + 1 == int( node.query( "SELECT value FROM system.events WHERE name = 'ObjectStorageQueueFailedFiles' SETTINGS system_events_show_zero_values=1" ) diff --git a/tests/integration/test_throttling/test.py b/tests/integration/test_throttling/test.py index c53c2bb1ddf..4bd96e2756d 100644 --- a/tests/integration/test_throttling/test.py +++ b/tests/integration/test_throttling/test.py @@ -121,21 +121,15 @@ def node_update_config(mode, setting, value=None): node.restart_clickhouse() -def assert_took(took, should_took): +def assert_took(took, should_take): # we need to decrease the lower limit because the server limits could # be enforced by throttling some server background IO instead of query IO # and we have no control over it - # - # and the same for upper limit, it can be slightly larger, due to for - # instance network latencies or CPU starvation - if should_took > 0: - assert took >= should_took * 0.85 and took <= should_took * 1.8 - else: - assert took >= should_took * 0.85 + assert took >= should_take * 0.85 @pytest.mark.parametrize( - "policy,backup_name,mode,setting,value,should_took", + "policy,backup_name,mode,setting,value,should_take", [ # # Local -> Local @@ -149,7 +143,7 @@ def assert_took(took, should_took): 0, id="no_local_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", next_backup_name("local"), @@ -159,7 +153,7 @@ def assert_took(took, should_took): 7, id="user_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", next_backup_name("local"), @@ -181,7 +175,7 @@ def assert_took(took, should_took): 0, id="no_remote_to_local_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", next_backup_name("local"), @@ -191,7 +185,7 @@ def assert_took(took, should_took): 7, id="user_remote_to_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", next_backup_name("local"), @@ -252,7 +246,7 @@ def assert_took(took, should_took): 0, id="no_local_to_remote_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", next_backup_name("remote"), @@ -262,7 +256,7 @@ def assert_took(took, should_took): 7, id="user_local_to_remote_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", next_backup_name("remote"), @@ -274,7 +268,7 @@ def assert_took(took, should_took): ), ], ) -def test_backup_throttling(policy, backup_name, mode, setting, value, should_took): +def test_backup_throttling(policy, backup_name, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -284,7 +278,7 @@ def test_backup_throttling(policy, backup_name, mode, setting, value, should_too """ ) _, took = elapsed(node.query, f"backup table data to {backup_name}") - assert_took(took, should_took) + assert_took(took, should_take) def test_backup_throttling_override(): @@ -305,18 +299,18 @@ def test_backup_throttling_override(): "max_backup_bandwidth": "500K", }, ) - # reading 1e6*8 bytes with 500Ki default bandwith should take (8-0.5)/0.5=15 seconds + # reading 1e6*8 bytes with 500Ki default bandwidth should take (8-0.5)/0.5=15 seconds assert_took(took, 15) @pytest.mark.parametrize( - "policy,mode,setting,value,should_took", + "policy,mode,setting,value,should_take", [ # # Local # pytest.param("default", None, None, None, 0, id="no_local_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", "user", @@ -325,7 +319,7 @@ def test_backup_throttling_override(): 7, id="user_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", "server", @@ -338,7 +332,7 @@ def test_backup_throttling_override(): # Remote # pytest.param("s3", None, None, None, 0, id="no_remote_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", "user", @@ -347,7 +341,7 @@ def test_backup_throttling_override(): 7, id="user_remote_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", "server", @@ -358,7 +352,7 @@ def test_backup_throttling_override(): ), ], ) -def test_read_throttling(policy, mode, setting, value, should_took): +def test_read_throttling(policy, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -368,17 +362,17 @@ def test_read_throttling(policy, mode, setting, value, should_took): """ ) _, took = elapsed(node.query, f"select * from data") - assert_took(took, should_took) + assert_took(took, should_take) @pytest.mark.parametrize( - "policy,mode,setting,value,should_took", + "policy,mode,setting,value,should_take", [ # # Local # pytest.param("default", None, None, None, 0, id="no_local_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", "user", @@ -387,7 +381,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): 7, id="local_user_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", "server", @@ -400,7 +394,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): # Remote # pytest.param("s3", None, None, None, 0, id="no_remote_throttling"), - # writing 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # writing 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", "user", @@ -409,7 +403,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): 7, id="user_remote_throttling", ), - # writing 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # writing 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", "server", @@ -420,7 +414,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): ), ], ) -def test_write_throttling(policy, mode, setting, value, should_took): +def test_write_throttling(policy, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -429,7 +423,7 @@ def test_write_throttling(policy, mode, setting, value, should_took): """ ) _, took = elapsed(node.query, f"insert into data select * from numbers(1e6)") - assert_took(took, should_took) + assert_took(took, should_take) def test_max_mutations_bandwidth_for_server(): @@ -444,7 +438,7 @@ def test_max_mutations_bandwidth_for_server(): node.query, "alter table data update key = -key where 1 settings mutations_sync = 1", ) - # reading 1e6*8 bytes with 1M/s bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M/s bandwidth should take (8-1)/1=7 seconds assert_took(took, 7) @@ -457,5 +451,5 @@ def test_max_merges_bandwidth_for_server(): ) node.query("insert into data select * from numbers(1e6)") _, took = elapsed(node.query, "optimize table data final") - # reading 1e6*8 bytes with 1M/s bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M/s bandwidth should take (8-1)/1=7 seconds assert_took(took, 7) diff --git a/tests/performance/all_join_opt.xml b/tests/performance/all_join_opt.xml new file mode 100644 index 00000000000..0ab9c39f67c --- /dev/null +++ b/tests/performance/all_join_opt.xml @@ -0,0 +1,15 @@ + + CREATE TABLE test (a Int64, b String, c LowCardinality(String)) ENGINE = MergeTree() ORDER BY a + CREATE TABLE test1 (a Int64, b String, c LowCardinality(String)) ENGINE = MergeTree() ORDER BY a + + INSERT INTO test SELECT number % 10000, number % 10000, number % 10000 FROM numbers(10000000) + INSERT INTO test1 SELECT number % 1000 , number % 1000, number % 1000 FROM numbers(100000) + + SELECT MAX(test1.a) FROM test INNER JOIN test1 on test.b = test1.b + SELECT MAX(test1.a) FROM test LEFT JOIN test1 on test.b = test1.b + SELECT MAX(test1.a) FROM test RIGHT JOIN test1 on test.b = test1.b + SELECT MAX(test1.a) FROM test FULL JOIN test1 on test.b = test1.b + + DROP TABLE IF EXISTS test + DROP TABLE IF EXISTS test1 + \ No newline at end of file diff --git a/tests/performance/json_type.xml b/tests/performance/json_type.xml index b6406f52579..db3fd844f89 100644 --- a/tests/performance/json_type.xml +++ b/tests/performance/json_type.xml @@ -27,9 +27,9 @@ - CREATE TABLE t_json_1(data JSON) ENGINE = MergeTree ORDER BY tuple() - CREATE TABLE t_json_2(data JSON) ENGINE = MergeTree ORDER BY tuple() - CREATE TABLE t_json_3(data JSON) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_1(data Object('json')) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_2(data Object('json')) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_3(data Object('json')) ENGINE = MergeTree ORDER BY tuple() INSERT INTO t_json_1 SELECT materialize({json1}) FROM numbers(200000) INSERT INTO t_json_2 SELECT {json2} FROM numbers(100000) diff --git a/tests/performance/materialized_view_deduplication.xml b/tests/performance/materialized_view_deduplication.xml new file mode 100644 index 00000000000..e5e0e5fc6e4 --- /dev/null +++ b/tests/performance/materialized_view_deduplication.xml @@ -0,0 +1,33 @@ + + + 1 + + + CREATE TABLE dst (`key` Int64, `value` String) + ENGINE = MergeTree ORDER BY tuple() + SETTINGS non_replicated_deduplication_window=1000; + + + CREATE TABLE mv_dst (`key` Int64, `value` String) + ENGINE = MergeTree ORDER BY tuple() + SETTINGS non_replicated_deduplication_window=1000; + + + CREATE MATERIALIZED VIEW mv_first TO mv_dst + AS SELECT 0 AS key, value AS value FROM dst; + + + CREATE MATERIALIZED VIEW mv_second TO mv_dst + AS SELECT 0 AS key, value AS value FROM dst; + + INSERT INTO dst SELECT number as key, toString(number) from numbers(1000); + + + INSERT INTO dst SELECT number as key, toString(number) from numbers(1000); + + + DROP TABLE IF EXISTS dst + DROP TABLE IF EXISTS mv_dst + DROP TABLE IF EXISTS mv_first + DROP TABLE IF EXISTS mv_second + diff --git a/tests/performance/new_json_type.xml b/tests/performance/new_json_type.xml new file mode 100644 index 00000000000..1ad21850c6c --- /dev/null +++ b/tests/performance/new_json_type.xml @@ -0,0 +1,41 @@ + + + 1 + + + + + + + json1 + + '{"k1":1, "k2": "some"}' + + + + json2 + + '{"col' || toString(number % 100) || '":' || toString(number) || '}' + + + + json3 + + '{"k1":[{"k2":"aaa","k3":[{"k4":"bbb"},{"k4":"ccc"}]},{"k2":"ddd","k3":[{"k4":"eee"},{"k4":"fff"}]}]}' + + + + + CREATE TABLE t_json_1(data JSON) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_2(data JSON) ENGINE = MergeTree ORDER BY tuple() + CREATE TABLE t_json_3(data JSON) ENGINE = MergeTree ORDER BY tuple() + + INSERT INTO t_json_1 SELECT materialize({json1}) FROM numbers(200000) + INSERT INTO t_json_2 SELECT {json2} FROM numbers(100000) + INSERT INTO t_json_3 SELECT materialize({json3}) FROM numbers_mt(100000) + + DROP TABLE IF EXISTS t_json_1 + DROP TABLE IF EXISTS t_json_2 + DROP TABLE IF EXISTS t_json_3 + diff --git a/tests/performance/parallel_mv.xml b/tests/performance/parallel_mv.xml index 5b856740a19..0bf5ed1be09 100644 --- a/tests/performance/parallel_mv.xml +++ b/tests/performance/parallel_mv.xml @@ -11,13 +11,13 @@ create table mt_4 (n UInt64, s String) engine = MergeTree order by tuple() create materialized view mv_1 to mt_1 as - select number, toString(number) from main_table where number % 13 != 0 + select number as n, toString(number) as s from main_table where number % 13 != 0 create materialized view mv_2 to mt_2 as - select number, toString(number) from main_table where number % 13 != 1 + select number as n, toString(number) as s from main_table where number % 13 != 1 create materialized view mv_3 to mt_3 as - select number, toString(number) from main_table where number % 13 != 3 + select number as n, toString(number) as s from main_table where number % 13 != 3 create materialized view mv_4 to mt_4 as - select number, toString(number) from main_table where number % 13 != 4 + select number as n, toString(number) as s from main_table where number % 13 != 4 SYSTEM STOP MERGES main_table SYSTEM STOP MERGES mt_1 diff --git a/tests/performance/parquet_read_with_index.xml b/tests/performance/parquet_read_with_index.xml new file mode 100644 index 00000000000..1bb2d8eb4a2 --- /dev/null +++ b/tests/performance/parquet_read_with_index.xml @@ -0,0 +1,30 @@ + + + INSERT INTO FUNCTION file('test_pq_index', Parquet) SELECT * FROM generateRandom('int64_column Nullable(Int64), tuple_column Tuple(a Nullable(String), b Nullable(Float64), c Tuple(i UInt32, j UInt32)),array_tuple_column Array(Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))), map_tuple_column Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') limit 1000000 SETTINGS output_format_parquet_use_custom_encoder=false, output_format_parquet_write_page_index=true + + + + SELECT * FROM file('test_pq_index', Parquet, 'tuple_column Tuple(a Nullable(String))') Format Null + + + + SELECT tuple_column.a FROM file('test_pq_index', Parquet) Format Null + + + + SELECT tuple_column.a FROM file('test_pq_index', Parquet, 'tuple_column Tuple(a Nullable(String))') Format Null + + + + SELECT tuple_column.c.i FROM file('test_pq_index', Parquet) Format Null + + + + SELECT * FROM file('test_pq_index', Parquet, 'array_tuple_column Array (Tuple(a Nullable(String)))') Format Null + + + + SELECT * FROM file('test_pq_index', Parquet, 'map_tuple_column Map(String, Tuple(a Nullable(String)))') Format Null + + + diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 53321afc94c..0f13217c236 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -9,6 +9,8 @@ system flush logs; drop table if exists logs; create view logs as select * from system.text_log where now() - toIntervalMinute(120) < event_time; +SET max_rows_to_read = 0; + -- Check that we don't have too many messages formatted with fmt::runtime or strings concatenation. -- 0.001 threshold should be always enough, the value was about 0.00025 WITH 0.001 AS threshold diff --git a/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql b/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql index 2f0ef648983..4b87b2af28d 100644 --- a/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql +++ b/tests/queries/0_stateless/00086_concat_nary_const_with_nonconst_segfault.sql @@ -1 +1 @@ -SELECT extract(toString(number), '10000000') FROM system.numbers_mt WHERE concat(materialize('1'), '...', toString(number)) LIKE '%10000000%' LIMIT 1 +SELECT extract(toString(number), '10000000') FROM system.numbers_mt WHERE concat(materialize('1'), '...', toString(number)) LIKE '%10000000%' LIMIT 1 SETTINGS max_rows_to_read = 0; diff --git a/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference b/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference index df5aa77af60..7534c12a0d8 100644 --- a/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference +++ b/tests/queries/0_stateless/00111_shard_external_sort_distributed.reference @@ -1,20 +1,20 @@ -7040546 -7040546 -4327029 -4327029 -1613512 -1613512 -8947307 -8947307 -6233790 -6233790 -3520273 -3520273 -806756 -806756 -8140551 -8140551 -5427034 -5427034 -2713517 -2713517 +4437158 +4437158 +1723641 +1723641 +3630402 +3630402 +916885 +916885 +2823646 +2823646 +110129 +110129 +4730407 +4730407 +2016890 +2016890 +3923651 +3923651 +1210134 +1210134 diff --git a/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql index 112f5edae36..9e06654195d 100644 --- a/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql +++ b/tests/queries/0_stateless/00111_shard_external_sort_distributed.sql @@ -1,11 +1,12 @@ --- Tags: distributed +-- Tags: distributed, long, no-flaky-check +-- ^ no-flaky-check - sometimes longer than 600s with ThreadFuzzer. -SET max_memory_usage = 300000000; -SET max_bytes_before_external_sort = 20000000; +SET max_memory_usage = 150000000; +SET max_bytes_before_external_sort = 10000000; DROP TABLE IF EXISTS numbers10m; -CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 10000000; +CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 5000000; -SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers10m) ORDER BY number * 1234567890123456789 LIMIT 19999980, 20; +SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers10m) ORDER BY number * 1234567890123456789 LIMIT 4999980, 20; DROP TABLE numbers10m; diff --git a/tests/queries/0_stateless/00170_lower_upper_utf8.reference b/tests/queries/0_stateless/00170_lower_upper_utf8.reference index f202cb75513..3c644f22b9b 100644 --- a/tests/queries/0_stateless/00170_lower_upper_utf8.reference +++ b/tests/queries/0_stateless/00170_lower_upper_utf8.reference @@ -22,3 +22,7 @@ 1 1 1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/00170_lower_upper_utf8.sql b/tests/queries/0_stateless/00170_lower_upper_utf8.sql index 4caba2033ff..85b6c5c6095 100644 --- a/tests/queries/0_stateless/00170_lower_upper_utf8.sql +++ b/tests/queries/0_stateless/00170_lower_upper_utf8.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + select lower('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str; select lowerUTF8('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str; select lower('AaAaAaAaAaAaAaA012345789,.!aAaA') = 'aaaaaaaaaaaaaaa012345789,.!aaaa'; @@ -27,3 +30,11 @@ select sum(lower(materialize('aaaaАБВГAAAAaaAA')) = materialize('aaaaАБВ select sum(upper(materialize('aaaaАБВГAAAAaaAA')) = materialize('AAAAАБВГAAAAAAAA')) = count() from system.one array join range(16384) as n; select sum(lowerUTF8(materialize('aaaaАБВГAAAAaaAA')) = materialize('aaaaабвгaaaaaaaa')) = count() from system.one array join range(16384) as n; select sum(upperUTF8(materialize('aaaaАБВГAAAAaaAA')) = materialize('AAAAАБВГAAAAAAAA')) = count() from system.one array join range(16384) as n; + +-- Turkish language +select upperUTF8('ır') = 'IR'; +select lowerUTF8('ır') = 'ır'; + +-- German language +select upper('öäüß') = 'öäüß'; +select lower('ÖÄÜẞ') = 'ÖÄÜẞ'; diff --git a/tests/queries/0_stateless/00233_position_function_family.sql b/tests/queries/0_stateless/00233_position_function_family.sql index dd7394bc39a..d6668cb7ba4 100644 --- a/tests/queries/0_stateless/00233_position_function_family.sql +++ b/tests/queries/0_stateless/00233_position_function_family.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SET send_logs_level = 'fatal'; select 1 = position('', ''); diff --git a/tests/queries/0_stateless/00366_multi_statements.sh b/tests/queries/0_stateless/00366_multi_statements.sh index 0b2e80fe457..8546e547581 100755 --- a/tests/queries/0_stateless/00366_multi_statements.sh +++ b/tests/queries/0_stateless/00366_multi_statements.sh @@ -14,22 +14,22 @@ $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" $CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2" -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2;" -$CLICKHOUSE_CLIENT -n --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT --query="SELECT 1; S" 2>&1 | grep -o 'Syntax error' +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2" +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2;" +$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT 2; SELECT" 2>&1 | grep -o 'Syntax error' -$CLICKHOUSE_CLIENT -n --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;" +$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS t_00366; CREATE TABLE t_00366 (x UInt64) ENGINE = TinyLog;" $CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" $CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES (1),(2),(3);" -$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" -$CLICKHOUSE_CLIENT -n --query="SELECT * FROM t_00366" +$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES (1),(2),(3);" +$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" +$CLICKHOUSE_CLIENT --query="INSERT INTO t_00366 VALUES" <<< "(4),(5),(6)" +$CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "SELECT 1" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d "SELECT 1;" @@ -48,4 +48,4 @@ $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=INSERT+INTO+t_00366+VALUES" -d "(7),(8),(9)" $CLICKHOUSE_CLIENT --query="SELECT * FROM t_00366" -$CLICKHOUSE_CLIENT -n --query="DROP TABLE t_00366;" +$CLICKHOUSE_CLIENT --query="DROP TABLE t_00366;" diff --git a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql index 8a310cb8fc9..c8a243d9b27 100644 --- a/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql +++ b/tests/queries/0_stateless/00375_shard_group_uniq_array_of_string.sql @@ -1,4 +1,4 @@ --- Tags: shard +-- Tags: shard, long DROP TABLE IF EXISTS group_uniq_str; CREATE TABLE group_uniq_str ENGINE = Memory AS SELECT number % 10 as id, toString(intDiv((number%10000), 10)) as v FROM system.numbers LIMIT 10000000; @@ -7,7 +7,7 @@ INSERT INTO group_uniq_str SELECT 2 as id, toString(number % 100) as v FROM syst INSERT INTO group_uniq_str SELECT 5 as id, toString(number % 100) as v FROM system.numbers LIMIT 10000000; SELECT length(groupUniqArray(v)) FROM group_uniq_str GROUP BY id ORDER BY id; -SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '100M'; SELECT length(groupUniqArray(10)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; SELECT length(groupUniqArray(10000)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; diff --git a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql index abd0e6e6a45..4453c26283c 100644 --- a/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql +++ b/tests/queries/0_stateless/00376_shard_group_uniq_array_of_int_array.sql @@ -1,4 +1,6 @@ --- Tags: shard +-- Tags: long + +SET max_rows_to_read = '55M'; DROP TABLE IF EXISTS group_uniq_arr_int; CREATE TABLE group_uniq_arr_int ENGINE = Memory AS diff --git a/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql index e9cfff211f8..1ec91ac2396 100644 --- a/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql +++ b/tests/queries/0_stateless/00377_shard_group_uniq_array_of_string_array.sql @@ -1,4 +1,5 @@ --- Tags: shard +-- Tags: shard, long +SET max_rows_to_read = '55M'; DROP TABLE IF EXISTS group_uniq_arr_str; CREATE TABLE group_uniq_arr_str ENGINE = Memory AS diff --git a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh index 27b9f5c00c7..0635fbc2a57 100755 --- a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh +++ b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh @@ -43,10 +43,10 @@ popd > /dev/null #SCRIPTDIR=`dirname "$SCRIPTPATH"` SCRIPTDIR=$SCRIPTPATH -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED -cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout +cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED rm "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout diff --git a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh index 6ee1649c9ed..86902fca4aa 100755 --- a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh +++ b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh @@ -74,7 +74,7 @@ ${CLICKHOUSE_CLIENT} --query "DROP TABLE t" echo "A session cannot be used by concurrent connections:" -${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9&query_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT count() FROM system.numbers" >/dev/null & +${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9&query_id=${CLICKHOUSE_DATABASE}_9&max_rows_to_read=0" --data-binary "SELECT count() FROM system.numbers" >/dev/null & # An infinite loop is required to make the test reliable. We will ensure that at least once the query on the line above has started before this check while true diff --git a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh index 560b97a1d1b..5550fa69d3d 100755 --- a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh +++ b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS test_readonly; CREATE TABLE test_readonly ( ID Int @@ -16,7 +16,7 @@ $CLICKHOUSE_CLIENT -n --query=" ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; CREATE TEMPORARY TABLE readonly ( ID Int @@ -26,7 +26,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -34,7 +34,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 1; DROP TABLE test_readonly; " 2> /dev/null; @@ -46,7 +46,7 @@ CODE=$?; ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; CREATE TEMPORARY TABLE readonly ( ID Int @@ -58,7 +58,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -66,7 +66,7 @@ CODE=$?; [ "$CODE" -ne "164" ] && [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 2; DROP TABLE test_readonly; " 2> /dev/null; @@ -78,7 +78,7 @@ CODE=$?; ################ # Try to create temporary table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; CREATE TEMPORARY TABLE readonly ( ID Int @@ -90,7 +90,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to insert into exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; INSERT INTO test_readonly (ID) VALUES (1); " 2> /dev/null; @@ -98,7 +98,7 @@ CODE=$?; [ "$CODE" -ne "0" ] && echo "Fail" && exit $CODE; # Try to drop exists (non temporary) table -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SET readonly = 0; DROP TABLE test_readonly; " 2> /dev/null; diff --git a/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh index 7a71d17f19b..7d4125eea69 100755 --- a/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/tests/queries/0_stateless/00600_replace_running_query.sh @@ -6,41 +6,56 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -TEST_PREFIX=$RANDOM +TEST_PREFIX="${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "drop user if exists u_00600${TEST_PREFIX}" -${CLICKHOUSE_CLIENT} -q "create user u_00600${TEST_PREFIX} settings max_execution_time=60, readonly=1" +${CLICKHOUSE_CLIENT} -q "create user u_00600${TEST_PREFIX} settings max_execution_time=60, readonly=1, max_rows_to_read=0" ${CLICKHOUSE_CLIENT} -q "grant select on system.numbers to u_00600${TEST_PREFIX}" function wait_for_query_to_start() { - while [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE query_id = '$1'") == 0 ]]; do sleep 0.1; done + while [[ 0 -eq $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE query_id = '$1'") ]] + do + sleep 0.1 + done +} + +function wait_for_queries_to_finish() +{ + while [[ 0 -ne $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE current_database = '${CLICKHOUSE_DATABASE}' AND query NOT LIKE '%this query%'") ]] + do + sleep 0.1 + done } -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 & -wait_for_query_to_start 'hello' +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=${CLICKHOUSE_DATABASE}hello&replace_running_query=1&max_rows_to_read=0" -d 'SELECT 1, count() FROM system.numbers' > /dev/null 2>&1 & +wait_for_query_to_start "${CLICKHOUSE_DATABASE}hello" # Replace it -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=hello&replace_running_query=1" -d 'SELECT 0' +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=${CLICKHOUSE_DATABASE}hello&replace_running_query=1" -d 'SELECT 0' # Wait for it to be replaced wait +wait_for_queries_to_finish -${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id=42 --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & -wait_for_query_to_start '42' +${CLICKHOUSE_CLIENT_BINARY} --user=u_00600${TEST_PREFIX} --query_id="${CLICKHOUSE_DATABASE}42" --query='SELECT 2, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' & +wait_for_query_to_start "${CLICKHOUSE_DATABASE}42" # Trying to run another query with the same query_id -${CLICKHOUSE_CLIENT} --query_id=42 --query='SELECT 43' 2>&1 | grep -cF 'is already running by user' +${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --query='SELECT 43' 2>&1 | grep -cF 'is already running by user' # Trying to replace query of a different user -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=42&replace_running_query=1" -d 'SELECT 1' | grep -cF 'is already running by user' +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL&query_id=${CLICKHOUSE_DATABASE}42&replace_running_query=1" -d 'SELECT 1' | grep -cF 'is already running by user' -$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = '42' SYNC" > /dev/null +$CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}42' SYNC" > /dev/null wait +wait_for_queries_to_finish -${CLICKHOUSE_CLIENT} --query_id=42 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'was cancelled' & -wait_for_query_to_start '42' -${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --replace_running_query_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null +${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --max_rows_to_read=0 --query='SELECT 3, count() FROM system.numbers' 2>&1 | grep -cF 'QUERY_WAS_CANCELLED' & +wait_for_query_to_start "${CLICKHOUSE_DATABASE}42" +${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --replace_running_query=1 --replace_running_query_max_wait_ms=500 --query='SELECT 43' 2>&1 | grep -F "can't be stopped" > /dev/null wait -${CLICKHOUSE_CLIENT} --query_id=42 --replace_running_query=1 --query='SELECT 44' +wait_for_queries_to_finish + +${CLICKHOUSE_CLIENT} --query_id="${CLICKHOUSE_DATABASE}42" --replace_running_query=1 --query='SELECT 44' ${CLICKHOUSE_CLIENT} -q "drop user u_00600${TEST_PREFIX}" diff --git a/tests/queries/0_stateless/00601_kill_running_query.reference b/tests/queries/0_stateless/00601_kill_running_query.reference index 3917ff89482..7824d5804bc 100644 --- a/tests/queries/0_stateless/00601_kill_running_query.reference +++ b/tests/queries/0_stateless/00601_kill_running_query.reference @@ -1 +1 @@ -waiting test_00601_default default SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k) +waiting test_00601_default default SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k) SETTINGS max_rows_to_read = 0 diff --git a/tests/queries/0_stateless/00601_kill_running_query.sh b/tests/queries/0_stateless/00601_kill_running_query.sh index 3163f8146d0..be0fff49129 100755 --- a/tests/queries/0_stateless/00601_kill_running_query.sh +++ b/tests/queries/0_stateless/00601_kill_running_query.sh @@ -11,7 +11,7 @@ function wait_for_query_to_start() while [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() FROM system.processes WHERE query_id = '$1'") == 0 ]]; do sleep 0.1; done } -${CLICKHOUSE_CURL_COMMAND} -q --max-time 30 -sS "$CLICKHOUSE_URL&query_id=test_00601_$CLICKHOUSE_DATABASE" -d 'SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k)' > /dev/null & +${CLICKHOUSE_CURL_COMMAND} -q --max-time 30 -sS "$CLICKHOUSE_URL&query_id=test_00601_$CLICKHOUSE_DATABASE" -d 'SELECT sum(ignore(*)) FROM (SELECT number % 1000 AS k, groupArray(number) FROM numbers(50000000) GROUP BY k) SETTINGS max_rows_to_read = 0' > /dev/null & wait_for_query_to_start "test_00601_$CLICKHOUSE_DATABASE" $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "KILL QUERY WHERE query_id = 'test_00601_$CLICKHOUSE_DATABASE'" wait diff --git a/tests/queries/0_stateless/00632_get_sample_block_cache.sql b/tests/queries/0_stateless/00632_get_sample_block_cache.sql index ae9b6bb7b2c..a631cbb8b86 100644 --- a/tests/queries/0_stateless/00632_get_sample_block_cache.sql +++ b/tests/queries/0_stateless/00632_get_sample_block_cache.sql @@ -2,6 +2,9 @@ SET joined_subquery_requires_alias = 0; +-- We are no longer interested in the old analyzer. +SET allow_experimental_analyzer = 1; + -- This test (SELECT) without cache can take tens minutes DROP TABLE IF EXISTS dict_string; DROP TABLE IF EXISTS dict_ui64; @@ -41,8 +44,6 @@ SETTINGS index_granularity = 8192; CREATE TABLE dict_string (entityIri String) ENGINE = Memory; CREATE TABLE dict_ui64 (learnerId UInt64) ENGINE = Memory; ---SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count`, `time-before-full-watched-average`, if (isNaN((`overall-full-watched-learners-count`/`overall-watchers-count`) * 100), 0, (`overall-full-watched-learners-count`/`overall-watchers-count`) * 100) as `overall-watched-part`, if (isNaN((`full-watched-learners-count`/`watchers-count` * 100)), 0, (`full-watched-learners-count`/`watchers-count` * 100)) as `full-watched-part`, if (isNaN((`rejects-count`/`views-count` * 100)), 0, (`rejects-count`/`views-count` * 100)) as `rejects-part` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count`, `time-before-full-watched-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average` FROM (SELECT `entityIri`, `watchers-count` FROM (SELECT `entityIri` FROM `CloM8CwMR2`) ANY LEFT JOIN (SELECT uniq(learnerId) as `watchers-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(viewDurationSum) as `time-repeating-average`, `entityIri` FROM (SELECT sum(views.viewDuration) as viewDurationSum, `entityIri`, `learnerId` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `views`.`repeatingView` = 1 AND `learnerId` IN `tkRpHxGqM1` GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.viewDuration) as `reject-views-duration-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `views`.`reject` = 1 AND `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(viewsCount) as `repeating-views-count-average`, `entityIri` FROM (SELECT count() as viewsCount, `learnerId`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `courseId` = 1 AND `entityIri` IN `CloM8CwMR2` WHERE `views`.`repeatingView` = 1 AND `learnerId` IN `tkRpHxGqM1` GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.viewDuration) as `views-duration-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.watchedPart) as `watched-part-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT count() as `rejects-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `views`.`reject` = 1 AND `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(progressMax) as `progress-average`, `entityIri` FROM (SELECT max(views.progress) as progressMax, `entityIri`, `learnerId` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(fullWatchedViews) as `views-count-before-full-watched-average`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT any(duration) as `duration`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `full-watched-learners-count`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `fullWatched` = 1 AND `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `overall-watchers-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `overall-full-watched-learners-count`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `fullWatched` = 1 AND `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT count() as `views-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(fullWatchedTime) as `time-before-full-watched-average`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN `CloM8CwMR2` AND `courseId` = 1 WHERE `learnerId` IN `tkRpHxGqM1` GROUP BY `entityIri`) USING `entityIri`) FORMAT JSON; - SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count`, `time-before-full-watched-average`, if (isNaN((`overall-full-watched-learners-count`/`overall-watchers-count`) * 100), 0, (`overall-full-watched-learners-count`/`overall-watchers-count`) * 100) as `overall-watched-part`, if (isNaN((`full-watched-learners-count`/`watchers-count` * 100)), 0, (`full-watched-learners-count`/`watchers-count` * 100)) as `full-watched-part`, if (isNaN((`rejects-count`/`views-count` * 100)), 0, (`rejects-count`/`views-count` * 100)) as `rejects-part` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count`, `time-before-full-watched-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average` FROM (SELECT `entityIri`, `watchers-count` FROM (SELECT `entityIri` FROM dict_string) ANY LEFT JOIN (SELECT uniq(learnerId) as `watchers-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(viewDurationSum) as `time-repeating-average`, `entityIri` FROM (SELECT sum(views.viewDuration) as viewDurationSum, `entityIri`, `learnerId` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `views`.`repeatingView` = 1 AND `learnerId` IN dict_ui64 GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.viewDuration) as `reject-views-duration-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `views`.`reject` = 1 AND `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(viewsCount) as `repeating-views-count-average`, `entityIri` FROM (SELECT count() as viewsCount, `learnerId`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `courseId` = 1 AND `entityIri` IN dict_string WHERE `views`.`repeatingView` = 1 AND `learnerId` IN dict_ui64 GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.viewDuration) as `views-duration-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.watchedPart) as `watched-part-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT count() as `rejects-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `views`.`reject` = 1 AND `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(progressMax) as `progress-average`, `entityIri` FROM (SELECT max(views.progress) as progressMax, `entityIri`, `learnerId` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(fullWatchedViews) as `views-count-before-full-watched-average`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT any(duration) as `duration`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `full-watched-learners-count`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `fullWatched` = 1 AND `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `overall-watchers-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `overall-full-watched-learners-count`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `fullWatched` = 1 AND `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT count() as `views-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(fullWatchedTime) as `time-before-full-watched-average`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`); @@ -55,8 +56,8 @@ DROP TABLE video_views; --- Test for tsan: Ensure cache used from one thread -SET max_threads = 32; +-- Test for tsan: Ensure cache is used from one thread +SET max_threads = 32, max_memory_usage = '10G'; DROP TABLE IF EXISTS sample_00632; @@ -173,7 +174,6 @@ FROM UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) ) GROUP BY x - --HAVING c = 1 ORDER BY x ASC ); DROP TABLE sample_00632; diff --git a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index 93fd0c4a977..e9a4369a5bf 100755 --- a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh +++ b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -19,13 +19,13 @@ settings="$server_logs --log_queries=1 --log_query_threads=1 --log_profile_event # Test insert logging on each block and checkPacket() method -$CLICKHOUSE_CLIENT $settings -n -q " +$CLICKHOUSE_CLIENT $settings -q " DROP TABLE IF EXISTS null_00634; CREATE TABLE null_00634 (i UInt8) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple();" head -c 1000 /dev/zero | $CLICKHOUSE_CLIENT $settings --max_insert_block_size=10 --min_insert_block_size_rows=1 --min_insert_block_size_bytes=1 -q "INSERT INTO null_00634 FORMAT RowBinary" -$CLICKHOUSE_CLIENT $settings -n -q " +$CLICKHOUSE_CLIENT $settings -q " SELECT count() FROM null_00634; DROP TABLE null_00634;" diff --git a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh index 96d5764780f..d69e14bdbb9 100755 --- a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh +++ b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/mergetree_mutations.lib -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS fetches_r1 SYNC; DROP TABLE IF EXISTS fetches_r2 SYNC" @@ -17,7 +17,7 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE fetches_r2(x UInt32) ENGINE Replicate SETTINGS prefer_fetch_merged_part_time_threshold=0, \ prefer_fetch_merged_part_size_threshold=0" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET insert_keeper_fault_injection_probability=0; INSERT INTO fetches_r1 VALUES (1); INSERT INTO fetches_r1 VALUES (2); @@ -51,6 +51,6 @@ ${CLICKHOUSE_CLIENT} --query="SYSTEM SYNC REPLICA fetches_r2" ${CLICKHOUSE_CLIENT} --query="SELECT '*** Check data after fetch/clone of mutated part ***'" ${CLICKHOUSE_CLIENT} --query="SELECT _part, * FROM fetches_r2 ORDER BY x" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE fetches_r1 SYNC; DROP TABLE fetches_r2 SYNC" diff --git a/tests/queries/0_stateless/00727_concat.reference b/tests/queries/0_stateless/00727_concat.reference index 329ad36ad3c..a93bf12b77a 100644 --- a/tests/queries/0_stateless/00727_concat.reference +++ b/tests/queries/0_stateless/00727_concat.reference @@ -34,6 +34,7 @@ With 2023-11-14 05:50:12.123 With hallo With [\'foo\',\'bar\'] With {"foo":"bar"} +With {"foo":"bar"} With (42,\'foo\') With {42:\'foo\'} With 122.233.64.201 diff --git a/tests/queries/0_stateless/00727_concat.sql b/tests/queries/0_stateless/00727_concat.sql index 76dae541261..65cd019cc13 100644 --- a/tests/queries/0_stateless/00727_concat.sql +++ b/tests/queries/0_stateless/00727_concat.sql @@ -2,6 +2,7 @@ -- no-fasttest: json type needs rapidjson library, geo types need s2 geometry SET allow_experimental_object_type = 1; +SET allow_experimental_json_type = 1; SET allow_suspicious_low_cardinality_types=1; SELECT '-- Const string + non-const arbitrary type'; @@ -40,6 +41,7 @@ SELECT concat('With ', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'E SELECT concat('With ', materialize('hallo' :: Enum('hallo' = 1))); SELECT concat('With ', materialize(['foo', 'bar'] :: Array(String))); SELECT concat('With ', materialize('{"foo": "bar"}' :: JSON)); +SELECT concat('With ', materialize('{"foo": "bar"}' :: Object('json'))); SELECT concat('With ', materialize((42, 'foo') :: Tuple(Int32, String))); SELECT concat('With ', materialize(map(42, 'foo') :: Map(Int32, String))); SELECT concat('With ', materialize('122.233.64.201' :: IPv4)); diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh index 09f20284402..989096a26d6 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh @@ -25,83 +25,83 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed WHERE a = 0 AND b | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Should pass now -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0; " # Should still fail because of matching unavailable shard -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Try more complext expressions for constant folding - all should pass. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 1 AND a = 0 AND b = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a IN (0, 1) AND b IN (0, 1); " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR a = 1 AND b = 1; " # TODO: should pass one day. -#${CLICKHOUSE_CLIENT} -n --query=" +#${CLICKHOUSE_CLIENT} --query=" # SET optimize_skip_unused_shards = 1; # SELECT count(*) FROM distributed WHERE a = 0 AND b >= 0 AND b <= 1; #" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND c = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND c != 10; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 AND (a+b)*b != 12; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE (a = 0 OR a = 1) AND (b = 0 OR b = 1); " # These ones should fail. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b <= 1; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND c = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 OR a = 1 AND b = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed WHERE a = 0 AND b = 0 OR c = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh index 035907bddd7..b3dff2ea69a 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh @@ -30,73 +30,73 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count(*) FROM distributed_00754 PREWHERE a | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Should pass now -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0; " # Should still fail because of matching unavailable shard -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' # Try more complex expressions for constant folding - all should pass. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 1 AND a = 0 WHERE b = 0; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 1 WHERE b = 1 AND length(c) = 5; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a IN (0, 1) AND b IN (0, 1) WHERE c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a IN (0, 1) WHERE b IN (0, 1) AND c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR a = 1 AND b = 1 WHERE c LIKE '%l%'; " -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE (a = 0 OR a = 1) WHERE (b = 0 OR b = 1); " # These should fail. -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b <= 1; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 WHERE c LIKE '%l%'; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 OR a = 1 AND b = 0; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR a = 2 AND b = 2; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET optimize_skip_unused_shards = 1; SELECT count(*) FROM distributed_00754 PREWHERE a = 0 AND b = 0 OR c LIKE '%l%'; " 2>&1 \ | grep -F -q "All connection tries failed" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/00761_lower_utf8_bug.sql b/tests/queries/0_stateless/00761_lower_utf8_bug.sql index de20b894331..a0ab55edc15 100644 --- a/tests/queries/0_stateless/00761_lower_utf8_bug.sql +++ b/tests/queries/0_stateless/00761_lower_utf8_bug.sql @@ -1 +1,4 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SELECT lowerUTF8('\xF0') = lowerUTF8('\xF0'); diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference index a2178f5eda7..a6e03404f2b 100644 --- a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(DEFLATE_QPL),\n `data` String CODEC(DEFLATE_QPL),\n `ddd` Date CODEC(DEFLATE_QPL),\n `ddd32` Date32 CODEC(DEFLATE_QPL),\n `somenum` Float64 CODEC(DEFLATE_QPL),\n `somestr` FixedString(3) CODEC(DEFLATE_QPL),\n `othernum` Int64 CODEC(DEFLATE_QPL),\n `somearray` Array(UInt8) CODEC(DEFLATE_QPL),\n `somemap` Map(String, UInt32) CODEC(DEFLATE_QPL),\n `sometuple` Tuple(UInt16, UInt64) CODEC(DEFLATE_QPL)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(DEFLATE_QPL),\n `data` String CODEC(DEFLATE_QPL),\n `ddd` Date CODEC(DEFLATE_QPL),\n `ddd32` Date32 CODEC(DEFLATE_QPL),\n `somenum` Float64 CODEC(DEFLATE_QPL),\n `somestr` FixedString(3) CODEC(DEFLATE_QPL),\n `othernum` Int64 CODEC(DEFLATE_QPL),\n `somearray` Array(UInt8) CODEC(DEFLATE_QPL),\n `somemap` Map(String, UInt32) CODEC(DEFLATE_QPL),\n `sometuple` Tuple(\n UInt16,\n UInt64) CODEC(DEFLATE_QPL)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 hello 2018-12-14 2018-12-14 1.1 aaa 5 [1,2,3] {'k1':1,'k2':2} (1,2) 2 world 2018-12-15 2018-12-15 2.2 bbb 6 [4,5,6] {'k3':3,'k4':4} (3,4) 3 ! 2018-12-16 2018-12-16 3.3 ccc 7 [7,8,9] {'k5':5,'k6':6} (5,6) diff --git a/tests/queries/0_stateless/00804_test_zstd_qat_codec_compression.reference b/tests/queries/0_stateless/00804_test_zstd_qat_codec_compression.reference index 31a4360469f..ff70403ce7a 100644 --- a/tests/queries/0_stateless/00804_test_zstd_qat_codec_compression.reference +++ b/tests/queries/0_stateless/00804_test_zstd_qat_codec_compression.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(ZSTD_QAT(1)),\n `data` String CODEC(ZSTD_QAT(1)),\n `ddd` Date CODEC(ZSTD_QAT(1)),\n `ddd32` Date32 CODEC(ZSTD_QAT(1)),\n `somenum` Float64 CODEC(ZSTD_QAT(1)),\n `somestr` FixedString(3) CODEC(ZSTD_QAT(1)),\n `othernum` Int64 CODEC(ZSTD_QAT(1)),\n `somearray` Array(UInt8) CODEC(ZSTD_QAT(1)),\n `somemap` Map(String, UInt32) CODEC(ZSTD_QAT(1)),\n `sometuple` Tuple(UInt16, UInt64) CODEC(ZSTD_QAT(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(ZSTD_QAT(1)),\n `data` String CODEC(ZSTD_QAT(1)),\n `ddd` Date CODEC(ZSTD_QAT(1)),\n `ddd32` Date32 CODEC(ZSTD_QAT(1)),\n `somenum` Float64 CODEC(ZSTD_QAT(1)),\n `somestr` FixedString(3) CODEC(ZSTD_QAT(1)),\n `othernum` Int64 CODEC(ZSTD_QAT(1)),\n `somearray` Array(UInt8) CODEC(ZSTD_QAT(1)),\n `somemap` Map(String, UInt32) CODEC(ZSTD_QAT(1)),\n `sometuple` Tuple(\n UInt16,\n UInt64) CODEC(ZSTD_QAT(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 hello 2018-12-14 2018-12-14 1.1 aaa 5 [1,2,3] {'k1':1,'k2':2} (1,2) 2 world 2018-12-15 2018-12-15 2.2 bbb 6 [4,5,6] {'k3':3,'k4':4} (3,4) 3 ! 2018-12-16 2018-12-16 3.3 ccc 7 [7,8,9] {'k5':5,'k6':6} (5,6) diff --git a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh index 12d889a7137..8f7a1a9ae98 100755 --- a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh +++ b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) SHARD=$($CLICKHOUSE_CLIENT --query "Select getMacro('shard')") REPLICA=$($CLICKHOUSE_CLIENT --query "Select getMacro('replica')") -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS part_header_r1; DROP TABLE IF EXISTS part_header_r2; @@ -62,7 +62,7 @@ do [[ $count1 == 1 && $count2 == 1 ]] && break done -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SELECT '*** Test part removal ***'; SELECT '*** replica 1 ***'; diff --git a/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh index 5c21c70e06a..dd3735f27b1 100755 --- a/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh +++ b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)' +${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&max_rows_to_read=0&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)' i=0 retries=300 while [[ $i -lt $retries ]]; do diff --git a/tests/queries/0_stateless/00837_minmax_index.sh b/tests/queries/0_stateless/00837_minmax_index.sh index e4de0b9ebfc..ff487f50ee0 100755 --- a/tests/queries/0_stateless/00837_minmax_index.sh +++ b/tests/queries/0_stateless/00837_minmax_index.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00838_unique_index.sh b/tests/queries/0_stateless/00838_unique_index.sh index b267b6a8eb3..a3aba4f26b6 100755 --- a/tests/queries/0_stateless/00838_unique_index.sh +++ b/tests/queries/0_stateless/00838_unique_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00906_low_cardinality_cache.sql b/tests/queries/0_stateless/00906_low_cardinality_cache.sql index 55eacd0db44..337fba865fd 100644 --- a/tests/queries/0_stateless/00906_low_cardinality_cache.sql +++ b/tests/queries/0_stateless/00906_low_cardinality_cache.sql @@ -1,3 +1,6 @@ +-- Tags: long + +SET max_rows_to_read = '100M'; drop table if exists lc_00906; create table lc_00906 (b LowCardinality(String)) engine=MergeTree order by b SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; insert into lc_00906 select '0123456789' from numbers(100000000); diff --git a/tests/queries/0_stateless/00907_set_index_max_rows.sh b/tests/queries/0_stateless/00907_set_index_max_rows.sh index 3707aaf2ca6..bdd0f36346f 100755 --- a/tests/queries/0_stateless/00907_set_index_max_rows.sh +++ b/tests/queries/0_stateless/00907_set_index_max_rows.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh index 25a6567b894..3bd169dd6df 100755 --- a/tests/queries/0_stateless/00908_bloom_filter_index.sh +++ b/tests/queries/0_stateless/00908_bloom_filter_index.sh @@ -12,7 +12,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx3;" # NGRAM BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx ( k UInt64, @@ -22,7 +22,7 @@ CREATE TABLE bloom_filter_idx ORDER BY k SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi';" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx2 ( k UInt64, @@ -109,7 +109,7 @@ $CLICKHOUSE_CLIENT --optimize_or_like_chain 0 --query="SELECT count() FROM bloom # TOKEN BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx3 ( k UInt64, @@ -147,7 +147,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE bloom_filter_idx2" $CLICKHOUSE_CLIENT --query="DROP TABLE bloom_filter_idx3" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx_na;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx_na ( na Array(Array(String)), @@ -156,7 +156,7 @@ CREATE TABLE bloom_filter_idx_na ORDER BY na" 2>&1 | grep -c 'DB::Exception: Unexpected type Array(Array(String)) of bloom filter index' # NGRAM BF with IPv6 -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_ipv6_idx ( foo IPv6, diff --git a/tests/queries/0_stateless/00942_mutate_index.sh b/tests/queries/0_stateless/00942_mutate_index.sh index 6ebb30c25b9..e1e23639e85 100755 --- a/tests/queries/0_stateless/00942_mutate_index.sh +++ b/tests/queries/0_stateless/00942_mutate_index.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00943_materialize_index.sh b/tests/queries/0_stateless/00943_materialize_index.sh index 6ff7d34a9d7..e4a585fce97 100755 --- a/tests/queries/0_stateless/00943_materialize_index.sh +++ b/tests/queries/0_stateless/00943_materialize_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, @@ -34,7 +34,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO minmax_idx VALUES $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" $CLICKHOUSE_CLIENT --query="SELECT count() FROM minmax_idx WHERE i64 = 2 SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0 FORMAT JSON" | grep "rows_read" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" ALTER TABLE minmax_idx ADD INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1 SETTINGS mutations_sync = 2;" $CLICKHOUSE_CLIENT --query="ALTER TABLE minmax_idx MATERIALIZE INDEX idx IN PARTITION 1 SETTINGS mutations_sync = 2;" diff --git a/tests/queries/0_stateless/00944_clear_index_in_partition.sh b/tests/queries/0_stateless/00944_clear_index_in_partition.sh index 4655077960f..a12536da239 100755 --- a/tests/queries/0_stateless/00944_clear_index_in_partition.sh +++ b/tests/queries/0_stateless/00944_clear_index_in_partition.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/00961_check_table.reference b/tests/queries/0_stateless/00961_check_table.reference index a0a054898b9..686285bb6aa 100644 --- a/tests/queries/0_stateless/00961_check_table.reference +++ b/tests/queries/0_stateless/00961_check_table.reference @@ -14,4 +14,4 @@ ======== 201902_4_5_1 1 ======== -201801_1_1_0 1 +201801_1_1_2 1 diff --git a/tests/queries/0_stateless/00961_check_table.sql b/tests/queries/0_stateless/00961_check_table.sql index a6abe8103d5..fc3c5435670 100644 --- a/tests/queries/0_stateless/00961_check_table.sql +++ b/tests/queries/0_stateless/00961_check_table.sql @@ -39,6 +39,6 @@ CHECK TABLE mt_table PARTITION 201902 SETTINGS max_threads = 1; SELECT '========'; -CHECK TABLE mt_table PART '201801_1_1_0'; +CHECK TABLE mt_table PART '201801_1_1_2'; DROP TABLE IF EXISTS mt_table; diff --git a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh index e2ec7fd42e4..9e410f09b13 100755 --- a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh +++ b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx;" # NGRAM BF -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE bloom_filter_idx ( k UInt64, diff --git a/tests/queries/0_stateless/00965_set_index_string_functions.sh b/tests/queries/0_stateless/00965_set_index_string_functions.sh index 8892fb11752..0f29c3dd2f2 100755 --- a/tests/queries/0_stateless/00965_set_index_string_functions.sh +++ b/tests/queries/0_stateless/00965_set_index_string_functions.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE set_idx ( k UInt64, diff --git a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh index 389d433c7e2..ba260042f47 100755 --- a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh +++ b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS lowString;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS string;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" create table lowString ( a LowCardinality(String), @@ -18,7 +18,7 @@ ENGINE = MergeTree() PARTITION BY toYYYYMM(b) ORDER BY (a)" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" create table string ( a String, diff --git a/tests/queries/0_stateless/00974_query_profiler.sql b/tests/queries/0_stateless/00974_query_profiler.sql index cd2f65eb94a..1f90641726f 100644 --- a/tests/queries/0_stateless/00974_query_profiler.sql +++ b/tests/queries/0_stateless/00974_query_profiler.sql @@ -17,6 +17,7 @@ SELECT count() > 0 FROM system.trace_log t WHERE query_id = (SELECT query_id FRO SET query_profiler_real_time_period_ns = 0; SET query_profiler_cpu_time_period_ns = 1000000; SET log_queries = 1; +SET max_rows_to_read = 0; SELECT count(), ignore('test cpu time query profiler') FROM numbers_mt(10000000000); SET log_queries = 0; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/00976_max_execution_speed.sql b/tests/queries/0_stateless/00976_max_execution_speed.sql index 52c3f05ff43..41374712724 100644 --- a/tests/queries/0_stateless/00976_max_execution_speed.sql +++ b/tests/queries/0_stateless/00976_max_execution_speed.sql @@ -1,2 +1,2 @@ -SET max_execution_speed = 1, max_execution_time = 3; +SET max_execution_speed = 1, max_execution_time = 3, max_rows_to_read = 0; SELECT count() FROM system.numbers; -- { serverError TIMEOUT_EXCEEDED } diff --git a/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql b/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql index 7192642bcde..e545dec90b7 100644 --- a/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql +++ b/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql @@ -4,7 +4,7 @@ DROP TABLE IF EXISTS mat_view; CREATE TABLE test1 (a LowCardinality(String)) ENGINE=MergeTree() ORDER BY a; CREATE TABLE test2 (a UInt64) engine=MergeTree() ORDER BY a; -CREATE MATERIALIZED VIEW test_mv TO test2 AS SELECT toUInt64(a = 'test') FROM test1; +CREATE MATERIALIZED VIEW test_mv TO test2 AS SELECT toUInt64(a = 'test') AS a FROM test1; DROP TABLE test_mv; DROP TABLE test1; diff --git a/tests/queries/0_stateless/00990_hasToken.sh b/tests/queries/0_stateless/00990_hasToken.sh index 6a1d4ff5ccf..d79472aa5a5 100755 --- a/tests/queries/0_stateless/00990_hasToken.sh +++ b/tests/queries/0_stateless/00990_hasToken.sh @@ -6,4 +6,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # We should have correct env vars from shell_config.sh to run this test -python3 "$CURDIR"/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -nm +python3 "$CURDIR"/00990_hasToken.python | ${CLICKHOUSE_CLIENT} --max_query_size 1048576 -m diff --git a/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql b/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql index a090be85221..b8f2596f3d5 100644 --- a/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql +++ b/tests/queries/0_stateless/01010_pmj_right_table_memory_limits.sql @@ -1,5 +1,7 @@ -- Tags: no-parallel, no-fasttest, no-random-settings +SET max_bytes_in_join = 0; +SET max_rows_in_join = 0; SET max_memory_usage = 32000000; SET join_on_disk_max_files_to_merge = 4; diff --git a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh index 55bbfb3ff11..54f1bbe29dc 100755 --- a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh +++ b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) R1=table_1013_1 R2=table_1013_2 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; @@ -19,13 +19,13 @@ ${CLICKHOUSE_CLIENT} -n -q " INSERT INTO $R1 VALUES (1) " -timeout 10s ${CLICKHOUSE_CLIENT} -n -q " +timeout 10s ${CLICKHOUSE_CLIENT} -q " SET receive_timeout=1; SYSTEM SYNC REPLICA $R2 " 2>&1 | grep -F -q "Code: 159. DB::Exception" && echo 'OK' || echo 'Failed!' # By dropping tables all related SYNC REPLICA queries would be terminated as well -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; " diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index 4f35b69da0b..053fd9d9d49 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -9,7 +9,7 @@ R1=table_1017_1 R2=table_1017_2 T1=table_1017_merge -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R1; DROP TABLE IF EXISTS $R2; @@ -68,7 +68,7 @@ ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE ${CLICKHOUSE_CLIENT} --query "ALTER TABLE $R1 DELETE WHERE dictHas('${CLICKHOUSE_DATABASE}.dict1', toUInt64(x))" --allow_nondeterministic_mutations=1 2>&1 \ && echo 'OK' || echo 'FAIL' -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DICTIONARY IF EXISTS dict1; DROP TABLE IF EXISTS $R2; DROP TABLE IF EXISTS $R1; diff --git a/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql index c13a0859183..eca370d94af 100644 --- a/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql +++ b/tests/queries/0_stateless/01017_uniqCombined_memory_usage.sql @@ -7,7 +7,8 @@ -- sizeof(HLL) is (2^K * 6 / 8) -- hence max_memory_usage for 100 rows = (96<<10)*100 = 9830400 -SET use_uncompressed_cache = 0; +SET use_uncompressed_cache = 0; +SET memory_profiler_step = 1; -- HashTable for UInt32 (used until (1<<13) elements), hence 8192 elements SELECT 'UInt32'; @@ -31,14 +32,14 @@ SELECT 'K=16'; SELECT 'UInt32'; SET max_memory_usage = 2000000; SELECT sum(u) FROM (SELECT intDiv(number, 4096) AS k, uniqCombined(16)(number % 4096) u FROM numbers(4096 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } -SET max_memory_usage = 4915200; +SET max_memory_usage = 5230000; SELECT sum(u) FROM (SELECT intDiv(number, 4096) AS k, uniqCombined(16)(number % 4096) u FROM numbers(4096 * 100) GROUP BY k); -- HashTable for UInt64 (used until (1<<11) elements), hence 2048 elements SELECT 'UInt64'; SET max_memory_usage = 2000000; SELECT sum(u) FROM (SELECT intDiv(number, 2048) AS k, uniqCombined(16)(reinterpretAsString(number % 2048)) u FROM numbers(2048 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } -SET max_memory_usage = 4915200; +SET max_memory_usage = 5900000; SELECT sum(u) FROM (SELECT intDiv(number, 2048) AS k, uniqCombined(16)(reinterpretAsString(number % 2048)) u FROM numbers(2048 * 100) GROUP BY k); SELECT 'K=18'; diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh index 9a26f78a8ee..5c67fe08fbf 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_all.sh @@ -14,7 +14,7 @@ declare -a SearchTypes=("POLYGON" "POLYGON_SIMPLE" "POLYGON_INDEX_EACH" "POLYGON tar -xf "${CURDIR}"/01037_test_data_search.tar.gz -C "${DATA_DIR}" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS points; CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " @@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --max_insert_block_si rm "${DATA_DIR}"/01037_point_data -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array @@ -43,7 +43,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array diff --git a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh index 47f7a5c1c4f..591978d1129 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_correctness_fast.sh @@ -14,7 +14,7 @@ declare -a SearchTypes=("POLYGON_INDEX_EACH" "POLYGON_INDEX_CELL") tar -xf "${CURDIR}"/01037_test_data_perf.tar.gz -C "${DATA_DIR}" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE points (x Float64, y Float64) ENGINE = Memory; " @@ -22,7 +22,7 @@ $CLICKHOUSE_CLIENT --query="INSERT INTO points FORMAT TSV" --min_chunk_bytes_for rm "${DATA_DIR}"/01037_point_data -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array @@ -42,7 +42,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array diff --git a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh index d1ee3f283bc..ac033ff4eb8 100755 --- a/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh +++ b/tests/queries/0_stateless/01037_polygon_dicts_simple_functions.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) TMP_DIR=${CLICKHOUSE_TMP}/tmp mkdir -p $TMP_DIR -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS polygons_array; CREATE TABLE polygons_array (key Array(Array(Array(Array(Float64)))), name String, value UInt64) ENGINE = Memory; @@ -53,7 +53,7 @@ for type in "${SearchTypes[@]}"; do outputFile="${TMP_DIR}/results${type}.out" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP DICTIONARY IF EXISTS dict_array; CREATE DICTIONARY dict_array ( @@ -106,7 +106,7 @@ do diff -q "${CURDIR}/01037_polygon_dicts_simple_functions.ans" "$outputFile" done -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP DICTIONARY dict_array; DROP DICTIONARY dict_tuple; DROP TABLE polygons_array; diff --git a/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh index 0b14ef8f6fa..29ce4da02ed 100755 --- a/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh +++ b/tests/queries/0_stateless/01055_minmax_index_compact_parts.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE minmax_idx ( u64 UInt64, diff --git a/tests/queries/0_stateless/01077_mutations_index_consistency.sh b/tests/queries/0_stateless/01077_mutations_index_consistency.sh index ffbe3692b64..f103692de56 100755 --- a/tests/queries/0_stateless/01077_mutations_index_consistency.sh +++ b/tests/queries/0_stateless/01077_mutations_index_consistency.sh @@ -7,13 +7,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS movement" -$CLICKHOUSE_CLIENT -n --query "CREATE TABLE movement (date DateTime('Asia/Istanbul')) Engine = MergeTree ORDER BY (toStartOfHour(date)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';" +$CLICKHOUSE_CLIENT --query "CREATE TABLE movement (date DateTime('Asia/Istanbul')) Engine = MergeTree ORDER BY (toStartOfHour(date)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi';" $CLICKHOUSE_CLIENT --query "insert into movement select toDateTime('2020-01-22 00:00:00', 'Asia/Istanbul') + number%(23*3600) from numbers(1000000);" $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE movement FINAL" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -26,7 +26,7 @@ ORDER BY Hour DESC $CLICKHOUSE_CLIENT --query "alter table movement delete where date >= toDateTime('2020-01-22T16:00:00', 'Asia/Istanbul') and date < toDateTime('2020-01-22T17:00:00', 'Asia/Istanbul') SETTINGS mutations_sync = 2" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -37,7 +37,7 @@ ORDER BY Hour DESC " | grep "16:00:00" | wc -l -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour @@ -48,7 +48,7 @@ ORDER BY Hour DESC " | grep "22:00:00" | cut -f1 -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " SELECT count(), toStartOfHour(date) AS Hour diff --git a/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql b/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql index 21a84bdd691..45f1a00ae23 100644 --- a/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql +++ b/tests/queries/0_stateless/01091_query_profiler_does_not_hang.sql @@ -1,4 +1,4 @@ -- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug -SET query_profiler_cpu_time_period_ns = 1; +SET query_profiler_cpu_time_period_ns = 1, max_rows_to_read = 0; SELECT count() FROM numbers_mt(1000000000); diff --git a/tests/queries/0_stateless/01119_optimize_trivial_insert_select.sql b/tests/queries/0_stateless/01119_optimize_trivial_insert_select.sql index a53b60a5ad3..2b301d7aced 100644 --- a/tests/queries/0_stateless/01119_optimize_trivial_insert_select.sql +++ b/tests/queries/0_stateless/01119_optimize_trivial_insert_select.sql @@ -1,8 +1,9 @@ drop table if exists t; create table t(n int, a Int64, s String) engine = MergeTree() order by a; -set enable_positional_arguments=0; -set optimize_trivial_insert_select=1; +set enable_positional_arguments = 0; +set optimize_trivial_insert_select = 1; +set max_rows_to_read = 0; -- due to aggregate functions, optimize_trivial_insert_select will not be applied insert into t select 1, sum(number) as c, getSetting('max_threads') from numbers_mt(100000000) settings max_insert_threads=4, max_threads=2; diff --git a/tests/queries/0_stateless/01119_session_log.sql b/tests/queries/0_stateless/01119_session_log.sh old mode 100644 new mode 100755 similarity index 74% rename from tests/queries/0_stateless/01119_session_log.sql rename to tests/queries/0_stateless/01119_session_log.sh index 55f6228797a..2d17b545276 --- a/tests/queries/0_stateless/01119_session_log.sql +++ b/tests/queries/0_stateless/01119_session_log.sh @@ -1,5 +1,20 @@ --- Tags: no-fasttest +#!/usr/bin/env bash +# Tags: no-fasttest +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +session_log_query_prefix=" +system flush logs; +select distinct type, user, auth_type, toString(client_address)!='::ffff:0.0.0.0' as a, client_port!=0 as b, interface from system.session_log +where user in ('default', 'nonexistsnt_user_1119', ' ', ' INTERSERVER SECRET ') +and interface in ('HTTP', 'TCP', 'TCP_Interserver') +and (user != 'default' or (a=1 and b=1)) -- FIXME: we should not write uninitialized address and port (but we do sometimes) +and event_time >= now() - interval 5 minute" + +$CLICKHOUSE_CLIENT -nm -q " select * from remote('127.0.0.2', system, one, 'default', ''); select * from remote('127.0.0.2', system, one, 'default', 'wrong password'); -- { serverError AUTHENTICATION_FAILED } select * from remote('127.0.0.2', system, one, 'nonexistsnt_user_1119', ''); -- { serverError AUTHENTICATION_FAILED } @@ -16,9 +31,17 @@ select * from url('http://127.0.0.1:8123/?query=select+1&user=+++', LineAsString select * from cluster('test_cluster_interserver_secret', system, one); -system flush logs; -select distinct type, user, auth_type, toString(client_address)!='::ffff:0.0.0.0' as a, client_port!=0 as b, interface from system.session_log -where user in ('default', 'nonexistsnt_user_1119', ' ', ' INTERSERVER SECRET ') -and interface in ('HTTP', 'TCP', 'TCP_Interserver') -and (user != 'default' or (a=1 and b=1)) -- FIXME: we should not write uninitialized address and port (but we do sometimes) -and event_time >= now() - interval 5 minute order by type, user, interface; +$session_log_query_prefix and type != 'Logout' order by type, user, interface; +" + +# Wait for logout events. +for _ in {1..10} +do + if [ "`$CLICKHOUSE_CLIENT -q "$session_log_query_prefix and type = 'Logout'" | wc -l`" -eq 3 ] + then + break + fi + sleep 2 +done + +$CLICKHOUSE_CLIENT -q "$session_log_query_prefix and type = 'Logout' order by user, interface" diff --git a/tests/queries/0_stateless/01160_table_dependencies.sh b/tests/queries/0_stateless/01160_table_dependencies.sh index acb6522e9e2..b72acf62610 100755 --- a/tests/queries/0_stateless/01160_table_dependencies.sh +++ b/tests/queries/0_stateless/01160_table_dependencies.sh @@ -35,7 +35,7 @@ arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from s $CLICKHOUSE_CLIENT -q "select '====='" $CLICKHOUSE_CLIENT -q "alter table t add column x int default in(1, $CLICKHOUSE_DATABASE.s), drop column y" -$CLICKHOUSE_CLIENT -q "create materialized view mv to s as select n from t where n in (select n from join)" +$CLICKHOUSE_CLIENT -q "create materialized view mv to s as select n as x from t where n in (select n from join)" $CLICKHOUSE_CLIENT -q "select table, arraySort(dependencies_table), arraySort(loading_dependencies_table), arraySort(loading_dependent_table) from system.tables where database=currentDatabase() order by table" diff --git a/tests/queries/0_stateless/01161_all_system_tables.sh b/tests/queries/0_stateless/01161_all_system_tables.sh index 739df782a39..d4a80d074dc 100755 --- a/tests/queries/0_stateless/01161_all_system_tables.sh +++ b/tests/queries/0_stateless/01161_all_system_tables.sh @@ -19,7 +19,7 @@ function run_selects() thread_num=$1 readarray -t tables_arr < <(${CLICKHOUSE_CLIENT} -q "SELECT database || '.' || name FROM system.tables WHERE database in ('system', 'information_schema', 'INFORMATION_SCHEMA') and name != 'zookeeper' and name != 'models' - AND sipHash64(name || toString($RAND)) % $THREADS = $thread_num AND name NOT LIKE '%\\_sender' AND name NOT LIKE '%\\_watcher'") + AND sipHash64(name || toString($RAND)) % $THREADS = $thread_num AND name NOT LIKE '%\\_sender' AND name NOT LIKE '%\\_watcher' AND name != 'coverage_log'") for t in "${tables_arr[@]}" do diff --git a/tests/queries/0_stateless/01187_set_profile_as_setting.sh b/tests/queries/0_stateless/01187_set_profile_as_setting.sh index 42f596c45d6..f6c6fd0be34 100755 --- a/tests/queries/0_stateless/01187_set_profile_as_setting.sh +++ b/tests/queries/0_stateless/01187_set_profile_as_setting.sh @@ -7,11 +7,11 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -m -q "select value, changed from system.settings where name='readonly';" -$CLICKHOUSE_CLIENT -n -m -q "set profile='default'; select value, changed from system.settings where name='readonly';" -$CLICKHOUSE_CLIENT -n -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" 2>&1| grep -Fa "Cannot modify 'send_logs_level' setting in readonly mode" > /dev/null && echo "OK" +$CLICKHOUSE_CLIENT -m -q "select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='default'; select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" 2>&1| grep -Fa "Cannot modify 'send_logs_level' setting in readonly mode" > /dev/null && echo "OK" CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=fatal/g') -$CLICKHOUSE_CLIENT -n -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" +$CLICKHOUSE_CLIENT -m -q "set profile='readonly'; select value, changed from system.settings where name='readonly';" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=select+value,changed+from+system.settings+where+name='readonly'" ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&profile=default&query=select+value,changed+from+system.settings+where+name='readonly'" diff --git a/tests/queries/0_stateless/01191_rename_dictionary.sql b/tests/queries/0_stateless/01191_rename_dictionary.sql index c5012dabc81..be95e5a7d4b 100644 --- a/tests/queries/0_stateless/01191_rename_dictionary.sql +++ b/tests/queries/0_stateless/01191_rename_dictionary.sql @@ -27,6 +27,7 @@ RENAME DICTIONARY test_01191.t TO test_01191.dict1; -- {serverError INCORRECT_QU DROP DICTIONARY test_01191.t; -- {serverError INCORRECT_QUERY} DROP TABLE test_01191.t; +DROP DATABASE IF EXISTS dummy_db; CREATE DATABASE dummy_db ENGINE=Atomic; RENAME DICTIONARY test_01191.dict TO dummy_db.dict1; RENAME DICTIONARY dummy_db.dict1 TO test_01191.dict; diff --git a/tests/queries/0_stateless/01245_limit_infinite_sources.sql b/tests/queries/0_stateless/01245_limit_infinite_sources.sql index 05680d86a33..69c93baf8a8 100644 --- a/tests/queries/0_stateless/01245_limit_infinite_sources.sql +++ b/tests/queries/0_stateless/01245_limit_infinite_sources.sql @@ -9,3 +9,4 @@ FROM ) WHERE number = 1 LIMIT 1 +SETTINGS max_rows_to_read = 0; diff --git a/tests/queries/0_stateless/01249_flush_interactive.sh b/tests/queries/0_stateless/01249_flush_interactive.sh index 551e11c8c8d..775b7825a16 100755 --- a/tests/queries/0_stateless/01249_flush_interactive.sh +++ b/tests/queries/0_stateless/01249_flush_interactive.sh @@ -14,10 +14,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function test() { - timeout 5 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --query " + timeout 5 ${CLICKHOUSE_LOCAL} --max_execution_time 10 --max_rows_to_read 0 --query " SELECT DISTINCT number % 5 FROM system.numbers" ||: echo -e '---' - timeout 5 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10" --data-binary " + timeout 5 ${CLICKHOUSE_CURL} -sS --no-buffer "${CLICKHOUSE_URL}&max_execution_time=10&max_rows_to_read=0" --data-binary " SELECT DISTINCT number % 5 FROM system.numbers" ||: echo -e '---' } diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index 5f82731c54e..1ec53399958 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -10,7 +10,7 @@ set -o pipefail # shellcheck disable=SC2120 function execute() { - ${CLICKHOUSE_CLIENT} -n "$@" + ${CLICKHOUSE_CLIENT} "$@" } # diff --git a/tests/queries/0_stateless/01278_random_string_utf8.sql b/tests/queries/0_stateless/01278_random_string_utf8.sql index da2dc48c3e1..290d6a0c759 100644 --- a/tests/queries/0_stateless/01278_random_string_utf8.sql +++ b/tests/queries/0_stateless/01278_random_string_utf8.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SELECT randomStringUTF8('string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT lengthUTF8(randomStringUTF8(100)); SELECT toTypeName(randomStringUTF8(10)); diff --git a/tests/queries/0_stateless/01293_show_settings.reference b/tests/queries/0_stateless/01293_show_settings.reference index 187f55697e4..c4c3473ee18 100644 --- a/tests/queries/0_stateless/01293_show_settings.reference +++ b/tests/queries/0_stateless/01293_show_settings.reference @@ -5,5 +5,7 @@ connect_timeout_with_failover_secure_ms Milliseconds 3000 external_storage_connect_timeout_sec UInt64 10 s3_connect_timeout_ms UInt64 1000 filesystem_prefetch_max_memory_usage UInt64 1073741824 +max_memory_usage UInt64 5000000000 +max_memory_usage_for_user UInt64 32000000000 max_untracked_memory UInt64 1048576 memory_profiler_step UInt64 1048576 diff --git a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.reference b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.reference index b20e7415f52..6282bf366d0 100644 --- a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.reference +++ b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.reference @@ -1,2 +1,2 @@ -Memory limit (for query) exceeded +Memory limit exceeded Ok diff --git a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh index d74092d828d..5b7cba77432 100755 --- a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh +++ b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) start=$SECONDS # If the memory leak exists, it will lead to OOM fairly quickly. for _ in {1..1000}; do - $CLICKHOUSE_CLIENT --max_memory_usage 1G <<< "SELECT uniqExactState(number) FROM system.numbers_mt GROUP BY number % 10"; + $CLICKHOUSE_CLIENT --max_memory_usage 1G --max_rows_to_read 0 <<< "SELECT uniqExactState(number) FROM system.numbers_mt GROUP BY number % 10"; # NOTE: we cannot use timeout here since this will not guarantee that the query will be executed at least once. # (since graceful wait of clickhouse-client had been reverted) @@ -16,5 +16,5 @@ for _ in {1..1000}; do if [[ $elapsed -gt 30 ]]; then break fi -done 2>&1 | grep -o -F 'Memory limit (for query) exceeded' | uniq +done 2>&1 | grep -o -P 'Memory limit .+ exceeded' | sed -r -e 's/(Memory limit)(.+)( exceeded)/\1\3/' | uniq echo 'Ok' diff --git a/tests/queries/0_stateless/01304_direct_io_long.sh b/tests/queries/0_stateless/01304_direct_io_long.sh index 1241f299d94..867c37667fe 100755 --- a/tests/queries/0_stateless/01304_direct_io_long.sh +++ b/tests/queries/0_stateless/01304_direct_io_long.sh @@ -1,18 +1,19 @@ #!/usr/bin/env bash -# Tags: long, no-object-storage-with-slow-build +# Tags: long, no-object-storage-with-slow-build, no-flaky-check +# It can be too long with ThreadFuzzer CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --query " +$CLICKHOUSE_CLIENT --max_rows_to_read 50M --query " DROP TABLE IF EXISTS bug; CREATE TABLE bug (UserID UInt64, Date Date) ENGINE = MergeTree ORDER BY Date SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi', merge_max_block_size = 8192; INSERT INTO bug SELECT rand64(), '2020-06-07' FROM numbers(50000000); OPTIMIZE TABLE bug FINAL;" LOG="$CLICKHOUSE_TMP/err-$CLICKHOUSE_DATABASE" -$CLICKHOUSE_BENCHMARK --iterations 10 --max_threads 100 --min_bytes_to_use_direct_io 1 <<< "SELECT sum(UserID) FROM bug PREWHERE NOT ignore(Date)" 1>/dev/null 2>"$LOG" +$CLICKHOUSE_BENCHMARK --max_rows_to_read 51M --iterations 10 --max_threads 100 --min_bytes_to_use_direct_io 1 <<< "SELECT sum(UserID) FROM bug PREWHERE NOT ignore(Date)" 1>/dev/null 2>"$LOG" cat "$LOG" | grep Exception cat "$LOG" | grep Loaded diff --git a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh index db986e74b6b..02aa0f76be5 100755 --- a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh +++ b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh @@ -12,8 +12,8 @@ DATA_SIZE=200 SEQ=$(seq 0 $(($NUM_REPLICAS - 1))) -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE IF EXISTS r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "CREATE TABLE r$REPLICA (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r$REPLICA') ORDER BY x SETTINGS min_bytes_for_wide_part = '10M';"; done function thread() { @@ -30,6 +30,6 @@ done wait -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "SYSTEM SYNC REPLICA r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "SELECT count(), sum(x) FROM r$REPLICA"; done -for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT -n --query "DROP TABLE r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "SELECT count(), sum(x) FROM r$REPLICA"; done +for REPLICA in $SEQ; do $CLICKHOUSE_CLIENT --query "DROP TABLE r$REPLICA"; done diff --git a/tests/queries/0_stateless/01415_sticking_mutations.sh b/tests/queries/0_stateless/01415_sticking_mutations.sh index b7c8768a65d..97467c3ce9d 100755 --- a/tests/queries/0_stateless/01415_sticking_mutations.sh +++ b/tests/queries/0_stateless/01415_sticking_mutations.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS sticking_mutations" function check_sticky_mutations() { - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE sticking_mutations ( + $CLICKHOUSE_CLIENT --query "CREATE TABLE sticking_mutations ( date Date, key UInt64, value1 String, diff --git a/tests/queries/0_stateless/01431_utf8_ubsan.reference b/tests/queries/0_stateless/01431_utf8_ubsan.reference index c98c950d535..dc785e57851 100644 --- a/tests/queries/0_stateless/01431_utf8_ubsan.reference +++ b/tests/queries/0_stateless/01431_utf8_ubsan.reference @@ -1,2 +1,2 @@ -FF -FF +EFBFBD +EFBFBD diff --git a/tests/queries/0_stateless/01431_utf8_ubsan.sql b/tests/queries/0_stateless/01431_utf8_ubsan.sql index d6a299225b1..3a28e023805 100644 --- a/tests/queries/0_stateless/01431_utf8_ubsan.sql +++ b/tests/queries/0_stateless/01431_utf8_ubsan.sql @@ -1,2 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SELECT hex(lowerUTF8('\xFF')); SELECT hex(upperUTF8('\xFF')); diff --git a/tests/queries/0_stateless/01451_dist_logs.sh b/tests/queries/0_stateless/01451_dist_logs.sh index 23dee7a827d..e281e232bb5 100755 --- a/tests/queries/0_stateless/01451_dist_logs.sh +++ b/tests/queries/0_stateless/01451_dist_logs.sh @@ -10,4 +10,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # triggered not for the first query for _ in {1..20}; do echo "select * from remote('127.{2,3}', system.numbers) where number = 10 limit 1;" -done | ${CLICKHOUSE_CLIENT} -n 2>/dev/null +done | ${CLICKHOUSE_CLIENT} 2>/dev/null diff --git a/tests/queries/0_stateless/01458_named_tuple_millin.reference b/tests/queries/0_stateless/01458_named_tuple_millin.reference index 954dfe36563..86561570985 100644 --- a/tests/queries/0_stateless/01458_named_tuple_millin.reference +++ b/tests/queries/0_stateless/01458_named_tuple_millin.reference @@ -1,12 +1,16 @@ CREATE TABLE default.tuple ( - `j` Tuple(a Int8, b String) + `j` Tuple( + a Int8, + b String) ) ENGINE = Memory j Tuple(\n a Int8,\n b String) CREATE TABLE default.tuple ( - `j` Tuple(a Int8, b String) + `j` Tuple( + a Int8, + b String) ) ENGINE = Memory j Tuple(\n a Int8,\n b String) diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh index 56620d848a3..cc574557438 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh @@ -10,7 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -31,7 +31,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh index 91a73471557..24ea3ba3835 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh @@ -16,7 +16,7 @@ unset CLICKHOUSE_WRITE_COVERAGE NUM_REPLICAS=10 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -39,7 +39,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh index 1f76a2efc6b..a2ef0d52328 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum_detach_attach.sh @@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=6 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x; " @@ -36,7 +36,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i;" done diff --git a/tests/queries/0_stateless/01485_256_bit_multiply.sql b/tests/queries/0_stateless/01485_256_bit_multiply.sql index 5c8c47c9127..a4e99d51970 100644 --- a/tests/queries/0_stateless/01485_256_bit_multiply.sql +++ b/tests/queries/0_stateless/01485_256_bit_multiply.sql @@ -1,5 +1,7 @@ -- Tags: no-random-settings, no-asan, no-msan, no-tsan, no-ubsan, no-debug +SET max_rows_to_read = '100M'; + select count() from ( select toInt128(number) * number x, toInt256(number) * number y from numbers_mt(100000000) where x != y diff --git a/tests/queries/0_stateless/01504_compression_multiple_streams.reference b/tests/queries/0_stateless/01504_compression_multiple_streams.reference index 4d3aba66526..14cdce72044 100644 --- a/tests/queries/0_stateless/01504_compression_multiple_streams.reference +++ b/tests/queries/0_stateless/01504_compression_multiple_streams.reference @@ -1,20 +1,20 @@ 1 1 [[1]] (1,[1]) 1 1 [[1]] (1,[1]) -CREATE TABLE default.columns_with_multiple_streams\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(UInt32, Array(UInt64)) CODEC(T64, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192 +CREATE TABLE default.columns_with_multiple_streams\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(\n UInt32,\n Array(UInt64)) CODEC(T64, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) -CREATE TABLE default.columns_with_multiple_streams\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(UInt32, Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192 +CREATE TABLE default.columns_with_multiple_streams\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(\n UInt32,\n Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) 3 3 [[3]] (3,[3]) 1 1 [[1]] (1,[1]) 1 1 [[1]] (1,[1]) -CREATE TABLE default.columns_with_multiple_streams_compact\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(UInt32, Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000, index_granularity = 8192 +CREATE TABLE default.columns_with_multiple_streams_compact\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(\n UInt32,\n Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000, index_granularity = 8192 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) -CREATE TABLE default.columns_with_multiple_streams_compact\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(UInt32, Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000, index_granularity = 8192 +CREATE TABLE default.columns_with_multiple_streams_compact\n(\n `field0` Nullable(Int64) CODEC(Delta(2), LZ4),\n `field1` Nullable(UInt8) CODEC(Delta(8), LZ4),\n `field2` Array(Array(Int64)) CODEC(Delta(8), LZ4),\n `field3` Tuple(\n UInt32,\n Array(UInt64)) CODEC(Delta, Default)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000, index_granularity = 8192 1 1 [[1]] (1,[1]) 2 2 [[2]] (2,[2]) 3 3 [[3]] (3,[3]) diff --git a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh index 29593ea4fb5..6954fef7314 100755 --- a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh +++ b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh @@ -34,7 +34,7 @@ done # Check access rights -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP DATABASE IF EXISTS test; CREATE DATABASE test; USE test; diff --git a/tests/queries/0_stateless/01508_format_regexp_raw.sh b/tests/queries/0_stateless/01508_format_regexp_raw.sh index 8cf1bd73566..52613c28b2f 100755 --- a/tests/queries/0_stateless/01508_format_regexp_raw.sh +++ b/tests/queries/0_stateless/01508_format_regexp_raw.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t; CREATE TABLE t (a String, b String) ENGINE = Memory; " @@ -12,7 +12,7 @@ CREATE TABLE t (a String, b String) ENGINE = Memory; ${CLICKHOUSE_CLIENT} --format_regexp_escaping_rule 'Raw' --format_regexp '^(.+?) separator (.+?)$' --query ' INSERT INTO t FORMAT Regexp abc\ separator Hello, world!' -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t; DROP TABLE t; " diff --git a/tests/queries/0_stateless/01509_dictionary_preallocate.sh b/tests/queries/0_stateless/01509_dictionary_preallocate.sh index 2a22a307a08..0459f69b0ad 100755 --- a/tests/queries/0_stateless/01509_dictionary_preallocate.sh +++ b/tests/queries/0_stateless/01509_dictionary_preallocate.sh @@ -15,7 +15,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # PREALLOCATE attribute (and also for the history/greppability, that it was # such). -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS data_01509; DROP DICTIONARY IF EXISTS dict_01509; CREATE TABLE data_01509 diff --git a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh index 594caca7d04..dc178d081bf 100755 --- a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh +++ b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t; CREATE TABLE t (a String, b LowCardinality(Nullable(String))) ENGINE = Memory; " @@ -12,7 +12,7 @@ CREATE TABLE t (a String, b LowCardinality(Nullable(String))) ENGINE = Memory; ${CLICKHOUSE_CLIENT} --format_regexp_escaping_rule 'Raw' --format_regexp '^(.+?) separator (.+?)$' --query ' INSERT INTO t FORMAT Regexp abc\ separator Hello, world!' -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t; DROP TABLE t; " diff --git a/tests/queries/0_stateless/01526_initial_query_id.sh b/tests/queries/0_stateless/01526_initial_query_id.sh index e77764ee34e..8ba27a04d60 100755 --- a/tests/queries/0_stateless/01526_initial_query_id.sh +++ b/tests/queries/0_stateless/01526_initial_query_id.sh @@ -15,7 +15,7 @@ ${CLICKHOUSE_CURL} \ --get \ --data-urlencode "query=select 1 format Null" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " system flush logs; select interface, initial_query_id = query_id from system.query_log diff --git a/tests/queries/0_stateless/01548_create_table_compound_column_format.reference b/tests/queries/0_stateless/01548_create_table_compound_column_format.reference index c6c4dcdfa4a..c23cc57548b 100644 --- a/tests/queries/0_stateless/01548_create_table_compound_column_format.reference +++ b/tests/queries/0_stateless/01548_create_table_compound_column_format.reference @@ -1,12 +1,13 @@ CREATE TABLE test ( `a` Int64, - `b` NESTED(a Int64) + `b` Nested(a Int64) ) ENGINE = TinyLog CREATE TABLE test ( `a` Int64, - `b` TUPLE(a Int64) + `b` Tuple( + a Int64) ) ENGINE = TinyLog diff --git a/tests/queries/0_stateless/01548_create_table_compound_column_format.sh b/tests/queries/0_stateless/01548_create_table_compound_column_format.sh index 99e3aed2825..9065af17dc1 100755 --- a/tests/queries/0_stateless/01548_create_table_compound_column_format.sh +++ b/tests/queries/0_stateless/01548_create_table_compound_column_format.sh @@ -4,6 +4,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "CREATE TABLE test(a Int64, b NESTED(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT +echo "CREATE TABLE test(a Int64, b Nested(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT -echo "CREATE TABLE test(a Int64, b TUPLE(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT \ No newline at end of file +echo "CREATE TABLE test(a Int64, b Tuple(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT \ No newline at end of file diff --git a/tests/queries/0_stateless/01561_mann_whitney_scipy.python b/tests/queries/0_stateless/01561_mann_whitney_scipy.python index 4713120287d..0f84d510933 100644 --- a/tests/queries/0_stateless/01561_mann_whitney_scipy.python +++ b/tests/queries/0_stateless/01561_mann_whitney_scipy.python @@ -19,7 +19,13 @@ def test_and_check(name, a, b, t_stat, p_value): ) client.query( "INSERT INTO mann_whitney VALUES {};".format( - ", ".join(["({},{}), ({},{})".format(i, 0, j, 1) for i, j in zip(a, b)]) + ", ".join(["({},{})".format(i, 0) for i in a]) + ) + ) + + client.query( + "INSERT INTO mann_whitney VALUES {};".format( + ", ".join(["({},{})".format(i, 1) for i in b]) ) ) @@ -59,6 +65,15 @@ def test_mann_whitney(): test_and_check("mannWhitneyUTest('greater')", rvs1, rvs2, s, p) +def test_mann_whitney_skew(): + rvs1 = [1] + rvs2 = [0, 2, 4] + s, p = stats.mannwhitneyu(rvs1, rvs2, alternative="two-sided") + test_and_check("mannWhitneyUTest", rvs1, rvs2, s, p) + test_and_check("mannWhitneyUTest('two-sided')", rvs1, rvs2, s, p) + + if __name__ == "__main__": test_mann_whitney() + test_mann_whitney_skew() print("Ok.") diff --git a/tests/queries/0_stateless/01590_countSubstrings.sql b/tests/queries/0_stateless/01590_countSubstrings.sql index b38cbb7d188..5ec4f412d7f 100644 --- a/tests/queries/0_stateless/01590_countSubstrings.sql +++ b/tests/queries/0_stateless/01590_countSubstrings.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + -- -- countSubstrings -- diff --git a/tests/queries/0_stateless/01599_mutation_query_params.sh b/tests/queries/0_stateless/01599_mutation_query_params.sh index 52b0131a9c2..5b604c96028 100755 --- a/tests/queries/0_stateless/01599_mutation_query_params.sh +++ b/tests/queries/0_stateless/01599_mutation_query_params.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS test; CREATE TABLE test diff --git a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh index a07dd306b3e..0a9f94cc451 100755 --- a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh @@ -11,33 +11,40 @@ function query() ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&database_atomic_wait_for_drop_and_detach_synchronously=1" -d "$*" } -# NOTE: database = $CLICKHOUSE_DATABASE is unwanted -verify_sql="SELECT - (SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics) - = (SELECT sum(active), sum(NOT active) FROM - (SELECT active FROM system.parts UNION ALL SELECT active FROM system.projection_parts UNION ALL SELECT 1 FROM system.dropped_tables_parts))" # The query is not atomic - it can compare states between system.parts and system.metrics from different points in time. # So, there is inherent race condition. But it should get expected result eventually. # In case of test failure, this code will do infinite loop and timeout. verify() { - for i in {1..5000} - do - result=$( query "$verify_sql" ) - [ "$result" = "1" ] && echo "$result" && break - sleep 0.1 + local result - if [[ $i -eq 5000 ]] - then - query " - SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics; - SELECT sum(active), sum(NOT active) FROM system.parts; - SELECT sum(active), sum(NOT active) FROM system.projection_parts; - SELECT count() FROM system.dropped_tables_parts; - " + for _ in {1..100}; do + # NOTE: database = $CLICKHOUSE_DATABASE is unwanted + result=$( query "SELECT + (SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics) + = + (SELECT sum(active), sum(NOT active) FROM ( + SELECT active FROM system.parts + UNION ALL SELECT active FROM system.projection_parts + UNION ALL SELECT 1 FROM system.dropped_tables_parts + ))" + ) + + if [ "$result" = "1" ]; then + echo "$result" + return fi + + sleep 0.5 done + + $CLICKHOUSE_CLIENT -q " + SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics; + SELECT sum(active), sum(NOT active) FROM system.parts; + SELECT sum(active), sum(NOT active) FROM system.projection_parts; + SELECT count() FROM system.dropped_tables_parts; + " } query "DROP TABLE IF EXISTS test_table" diff --git a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh index 1d768c8b027..834eba8f25c 100755 --- a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh +++ b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " CREATE USER quoted_by_ip_${CLICKHOUSE_DATABASE}; CREATE USER quoted_by_forwarded_ip_${CLICKHOUSE_DATABASE}; @@ -57,7 +57,7 @@ ${CLICKHOUSE_CURL} -H 'X-Forwarded-For: 5.6.7.8, 1.2.3.4' -sS "${CLICKHOUSE_URL} ${CLICKHOUSE_CURL} -H 'X-Forwarded-For: 1.2.3.4, 5.6.7.8' -sS "${CLICKHOUSE_URL}&user=quoted_by_forwarded_ip_${CLICKHOUSE_DATABASE}" -d "SELECT count() FROM numbers(10)" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP QUOTA IF EXISTS quota_by_ip_${CLICKHOUSE_DATABASE}; DROP QUOTA IF EXISTS quota_by_forwarded_ip; diff --git a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql index 8b97f3514b3..8a6fa9b7845 100644 --- a/tests/queries/0_stateless/01603_read_with_backoff_bug.sql +++ b/tests/queries/0_stateless/01603_read_with_backoff_bug.sql @@ -1,15 +1,17 @@ --- Tags: long, no-tsan, no-distributed-cache --- Tag no-tsan: Too long for TSan +-- Tags: long, no-tsan, no-msan, no-distributed-cache +-- Too long for TSan and MSan set enable_filesystem_cache=0; set enable_filesystem_cache_on_write_operations=0; +set max_rows_to_read = '30M'; + drop table if exists t; create table t (x UInt64, s String) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; INSERT INTO t SELECT number, if(number < (8129 * 1024), arrayStringConcat(arrayMap(x -> toString(x), range(number % 128)), ' '), '') -FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8; +FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8, max_rows_to_read=0; -- optimize table t final; diff --git a/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh b/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh index 0e5c2862066..6a7eb975c87 100755 --- a/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh +++ b/tests/queries/0_stateless/01684_ssd_cache_dictionary_simple_key.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP DATABASE IF EXISTS 01684_database_for_cache_dictionary; CREATE DATABASE 01684_database_for_cache_dictionary; diff --git a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh index 55061b9a643..c2d222a86ea 100755 --- a/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh +++ b/tests/queries/0_stateless/01685_ssd_cache_dictionary_complex_key.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE complex_key_simple_attributes_source_table ( id UInt64, diff --git a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh index f8004f9350d..5d115e09a79 100755 --- a/tests/queries/0_stateless/01691_parser_data_type_exponential.sh +++ b/tests/queries/0_stateless/01691_parser_data_type_exponential.sh @@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # Check that DataType parser does not have exponential complexity in the case found by fuzzer. -for _ in {1..10}; do ${CLICKHOUSE_CLIENT} -n --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done +for _ in {1..10}; do ${CLICKHOUSE_CLIENT} --query "SELECT CAST(1 AS A2222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222220000000000000000000000000000000000000000000000000000000000000000000000000000002260637443813394204 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggre222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 22222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 2222222222222eFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpio22222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 00000000000000000000000000000000000000000000000000000000000000000000000000000001841416382, 222222222222222ggregateFuncpion(groupBitmap22222222222222222222222222222222222222222222222222222222222222222222222200000000000000000000178859639454016722222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmapp, 222222222222222ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateF222222222222222222222222222222222222222222222222222222222teFuncpion(groupBitmap, 222222222222223ggregateFuncpion(groupBitmap2222222222222222222222222222222222222222222222222222 222222222222222222ggregateFuncpion(groupBitmap, 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222, 222222222222222ggregateFuncpion(groupBitmap222222222222222222222222222222222222222222222222222222222222222222222222000000000000000000001788596394540167623 222222222222222222ggregateFu22222222222222222222222222 222222222, UInt33)); -- { clientError 62 }"; done diff --git a/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh index a166837e01a..f38e53f898a 100755 --- a/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01710_projections_optimize_aggregation_in_order.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS in_order_agg_01710; CREATE TABLE in_order_agg_01710 diff --git a/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh index ee73974e8a4..01537524730 100755 --- a/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01710_projections_partial_optimize_aggregation_in_order.sh @@ -9,7 +9,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS in_order_agg_partial_01710; CREATE TABLE in_order_agg_partial_01710 diff --git a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql index 6625ad916e8..83a26c83005 100644 --- a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql +++ b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql @@ -1,6 +1,7 @@ -- Tags: long, distributed, no-random-settings drop table if exists data_01730; +SET max_rows_to_read = 0, max_result_rows = 0, max_bytes_before_external_group_by = 0; -- does not use 127.1 due to prefer_localhost_replica diff --git a/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh b/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh index 2a7345f4865..f9681ebe4f5 100755 --- a/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh +++ b/tests/queries/0_stateless/01753_optimize_aggregation_in_order.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --optimize_aggregation_in_order=1 -nm -q " +$CLICKHOUSE_CLIENT --optimize_aggregation_in_order=1 -m -q " drop table if exists data_01753; create table data_01753 (key Int) engine=MergeTree() order by key as select * from numbers(8); select * from data_01753 group by key settings max_block_size=1; diff --git a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference index 74a0356b11e..fc1ddcae595 100644 --- a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference +++ b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.reference @@ -49,7 +49,7 @@ order by query; tuple(2) select 'optimize_skip_unused_shards_rewrite_in(2,)'; optimize_skip_unused_shards_rewrite_in(2,) -with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2,); +with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2); system flush logs; select splitByString('IN', query)[-1] from system.query_log where event_date >= yesterday() and @@ -59,10 +59,10 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%id_2%') and type = 'QueryFinish' order by query; - tuple(2) + (2) select 'optimize_skip_unused_shards_rewrite_in(0,)'; optimize_skip_unused_shards_rewrite_in(0,) -with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0,); +with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0); 0 0 system flush logs; select splitByString('IN', query)[-1] from system.query_log where @@ -73,7 +73,7 @@ select splitByString('IN', query)[-1] from system.query_log where query like concat('%', currentDatabase(), '%AS%id_00%') and type = 'QueryFinish' order by query; - tuple(0) + (0) -- signed column select 'signed column'; signed column diff --git a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql index bcbedeb3ada..0759fb93a44 100644 --- a/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql +++ b/tests/queries/0_stateless/01756_optimize_skip_unused_shards_rewrite_in.sql @@ -63,7 +63,7 @@ select splitByString('IN', query)[-1] from system.query_log where order by query; select 'optimize_skip_unused_shards_rewrite_in(2,)'; -with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2,); +with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2); system flush logs; select splitByString('IN', query)[-1] from system.query_log where event_date >= yesterday() and @@ -75,7 +75,7 @@ select splitByString('IN', query)[-1] from system.query_log where order by query; select 'optimize_skip_unused_shards_rewrite_in(0,)'; -with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0,); +with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0); system flush logs; select splitByString('IN', query)[-1] from system.query_log where event_date >= yesterday() and diff --git a/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh b/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh index b963f3a618f..3c9e12f780b 100755 --- a/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh +++ b/tests/queries/0_stateless/01758_optimize_skip_unused_shards_once.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -nm -q " +$CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -m -q " create table dist_01758 as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy); select * from dist_01758 where dummy = 0 format Null; " |& grep -o "StorageDistributed (dist_01758).*" diff --git a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh index 9c51b82282c..ee46f8194b9 100755 --- a/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh +++ b/tests/queries/0_stateless/01791_dist_INSERT_block_structure_mismatch.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -nm -q " +$CLICKHOUSE_CLIENT --prefer_localhost_replica=0 -m -q " DROP TABLE IF EXISTS tmp_01683; DROP TABLE IF EXISTS dist_01683; diff --git a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh index 4b75102e9cf..f3e8ceffff6 100755 --- a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh +++ b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh @@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function setup() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " drop table if exists data_01814; drop table if exists dist_01814; @@ -24,7 +24,7 @@ function setup() function cleanup() { - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " drop table data_01814; drop table dist_01814; " @@ -67,7 +67,7 @@ function test_distributed_push_down_limit_with_query_log() $CLICKHOUSE_CLIENT "${settings_and_opts[@]}" -q "select * from $table group by key limit $offset, 10" - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " system flush logs; select read_rows from system.query_log where diff --git a/tests/queries/0_stateless/01825_new_type_json_10.reference b/tests/queries/0_stateless/01825_new_type_json_10.reference new file mode 100644 index 00000000000..d70c8210914 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_10.reference @@ -0,0 +1,13 @@ +('a.b','Int64') +('a.c','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') +('d','Int64') +('e','Array(Nullable(Int64))') +('f','Int64') +{"o":{"a":{"b":"1","c":[{"d":"10","e":["31"]},{"d":"20","e":["63","127"]}]}}} +{"o":{"a":{"b":"2","c":[]}}} +{"o":{"a":{"b":"3","c":[{"e":["32"],"f":"20"},{"e":["64","128"],"f":"30"}]}}} +{"o":{"a":{"b":"4","c":[]}}} +1 [10,20] [[31],[63,127]] [NULL,NULL] +2 [] [] [] +3 [NULL,NULL] [[32],[64,128]] [20,30] +4 [] [] [] diff --git a/tests/queries/0_stateless/01825_new_type_json_10.sql b/tests/queries/0_stateless/01825_new_type_json_10.sql new file mode 100644 index 00000000000..f586cc4477b --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_10.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest + +SET allow_experimental_json_type = 1; + +DROP TABLE IF EXISTS t_json_10; +CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; + +INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 1, "c": [{"d": 10, "e": [31]}, {"d": 20, "e": [63, 127]}]}} {"a": {"b": 2, "c": []}} + +INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 3, "c": [{"f": 20, "e": [32]}, {"f": 30, "e": [64, 128]}]}} {"a": {"b": 4, "c": []}} + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(o)) as path FROM t_json_10 order by path; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(o.a.c.:`Array(JSON)`))) as path FROM t_json_10 order by path; +SELECT o FROM t_json_10 ORDER BY o.a.b FORMAT JSONEachRow; +SELECT o.a.b, o.a.c.:`Array(JSON)`.d, o.a.c.:`Array(JSON)`.e, o.a.c.:`Array(JSON)`.f FROM t_json_10 ORDER BY o.a.b; + +DROP TABLE t_json_10; diff --git a/tests/queries/0_stateless/01825_new_type_json_11.reference b/tests/queries/0_stateless/01825_new_type_json_11.reference new file mode 100644 index 00000000000..aa3375a23cb --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_11.reference @@ -0,0 +1,13 @@ +('id','Int64') +('key_1','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') +('key_2','Int64') +('key_3','Array(JSON(max_dynamic_types=8, max_dynamic_paths=64))') +('key_4','Array(JSON(max_dynamic_types=4, max_dynamic_paths=16))') +('key_7','Int64') +('key_5','Int64') +{"obj":{"id":"1","key_1":[{"key_2":"100","key_3":[{"key_4":[{"key_5":"-2"}],"key_7":"257"}]},{"key_2":"65536"}]}} +{"obj":{"id":"2","key_1":[{"key_2":"101","key_3":[{"key_4":[{"key_5":"-2"}]}]},{"key_2":"102","key_3":[{"key_7":"257"}]},{"key_2":"65536"}]}} +{"obj.key_1.:`Array(JSON)`.key_3":[[{"key_4":[{"key_5":"-2"}],"key_7":"257"}],null]} +{"obj.key_1.:`Array(JSON)`.key_3":[[{"key_4":[{"key_5":"-2"}]}],[{"key_7":"257"}],null]} +[[[-2]],[]] [[257],[]] +[[[-2]],[[]],[]] [[NULL],[257],[]] diff --git a/tests/queries/0_stateless/01825_new_type_json_11.sh b/tests/queries/0_stateless/01825_new_type_json_11.sh new file mode 100755 index 00000000000..f448b7433ab --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_11.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_11" + +$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_11 (obj JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +cat < notEmpty(x), outpoints)" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS btc" + +rm ${CLICKHOUSE_USER_FILES_UNIQUE}/btc_transactions.json diff --git a/tests/queries/0_stateless/01825_new_type_json_distributed.reference b/tests/queries/0_stateless/01825_new_type_json_distributed.reference new file mode 100644 index 00000000000..b2cbe847542 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_distributed.reference @@ -0,0 +1,4 @@ +{"k1":"2","k2":{"k3":"qqq","k4":["44","55"]}} {'k1':'Int64','k2.k3':'String','k2.k4':'Array(Nullable(Int64))'} +{"k1":"2","k2":{"k3":"qqq","k4":["44","55"]}} {'k1':'Int64','k2.k3':'String','k2.k4':'Array(Nullable(Int64))'} +2 qqq [44,55] +2 qqq [44,55] diff --git a/tests/queries/0_stateless/01825_new_type_json_distributed.sql b/tests/queries/0_stateless/01825_new_type_json_distributed.sql new file mode 100644 index 00000000000..0fede046927 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_distributed.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SET allow_experimental_json_type = 1; + +DROP TABLE IF EXISTS t_json_local; +DROP TABLE IF EXISTS t_json_dist; + +CREATE TABLE t_json_local(data JSON) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t_json_dist AS t_json_local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), t_json_local); + +INSERT INTO t_json_local FORMAT JSONAsObject {"k1": 2, "k2": {"k3": "qqq", "k4": [44, 55]}} +; + +SELECT data, JSONAllPathsWithTypes(data) FROM t_json_dist; +SELECT data.k1, data.k2.k3, data.k2.k4 FROM t_json_dist; + +DROP TABLE IF EXISTS t_json_local; +DROP TABLE IF EXISTS t_json_dist; diff --git a/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference b/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference new file mode 100644 index 00000000000..7efe8cea252 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ephemeral.reference @@ -0,0 +1 @@ +PushEvent some-repo {"actor":{"avatar_url":"https:\\/\\/avatars.githubusercontent.com\\/u\\/123213213?","display_login":"github-actions","gravatar_id":"","id":"123123123","login":"github-actions[bot]","url":"https:\\/\\/api.github.com\\/users\\/github-actions[bot]"},"created_at":"2022-01-04 07:00:00","repo":{"id":"1001001010101","name":"some-repo","url":"https:\\/\\/api.github.com\\/repos\\/some-repo"},"type":"PushEvent"} diff --git a/tests/queries/0_stateless/01825_new_type_json_ephemeral.sql b/tests/queries/0_stateless/01825_new_type_json_ephemeral.sql new file mode 100644 index 00000000000..4aaebfd326f --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ephemeral.sql @@ -0,0 +1,18 @@ + +SET allow_experimental_json_type = 1; + +DROP TABLE IF EXISTS t_github_json; + +CREATE table t_github_json +( + event_type LowCardinality(String) DEFAULT JSONExtractString(message_raw, 'type'), + repo_name LowCardinality(String) DEFAULT JSONExtractString(message_raw, 'repo', 'name'), + message JSON DEFAULT empty(message_raw) ? '{}' : message_raw, + message_raw String EPHEMERAL +) ENGINE = MergeTree ORDER BY (event_type, repo_name); + +INSERT INTO t_github_json (message_raw) FORMAT JSONEachRow {"message_raw": "{\"type\":\"PushEvent\", \"created_at\": \"2022-01-04 07:00:00\", \"actor\":{\"avatar_url\":\"https://avatars.githubusercontent.com/u/123213213?\",\"display_login\":\"github-actions\",\"gravatar_id\":\"\",\"id\":123123123,\"login\":\"github-actions[bot]\",\"url\":\"https://api.github.com/users/github-actions[bot]\"},\"repo\":{\"id\":1001001010101,\"name\":\"some-repo\",\"url\":\"https://api.github.com/repos/some-repo\"}}"} + +SELECT * FROM t_github_json ORDER BY event_type, repo_name; + +DROP TABLE t_github_json; diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata.reference b/tests/queries/0_stateless/01825_new_type_json_ghdata.reference new file mode 100644 index 00000000000..ca2fb7e8ff9 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata.reference @@ -0,0 +1,12 @@ +5000 +leonardomso/33-js-concepts 3 +ytdl-org/youtube-dl 3 +Bogdanp/neko 2 +bminossi/AllVideoPocsFromHackerOne 2 +disclose/diodata 2 +Commit 182 +chipeo345 119 +phanwi346 114 +Nicholas Piggin 95 +direwolf-github 49 +2 diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh new file mode 100755 index 00000000000..f165223fb98 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-s3-storage, long +# ^ no-s3-storage: too memory hungry + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data JSON(max_dynamic_paths=100)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1 + +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} \ + --max_memory_usage 10G --query "INSERT INTO ghdata FORMAT JSONAsObject" + +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" + +${CLICKHOUSE_CLIENT} -q \ +"SELECT data.repo.name, count() AS stars FROM ghdata \ + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" + +${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ +"SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ + ARRAY JOIN data.payload.commits[].author.name \ + GROUP BY name ORDER BY c DESC, name LIMIT 5" + +${CLICKHOUSE_CLIENT} -q "SELECT max(data.payload.pull_request.assignees[].size0) FROM ghdata" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.reference b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh new file mode 100755 index 00000000000..b450e9827c2 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata_insert_select.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-s3-storage, long, no-asan +# ^ no-s3-storage: it is memory-hungry, no-asan: too long + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_string" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_from_string" + +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2 (data JSON(max_dynamic_paths=100)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1 +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_string (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_from_string (data JSON(max_dynamic_paths=100)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_json_type 1 + +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO ghdata_2 FORMAT JSONAsObject" +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_string FORMAT JSONAsString" + +${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO ghdata_2_from_string SELECT data FROM ghdata_2_string" + +${CLICKHOUSE_CLIENT} -q "SELECT \ + (SELECT mapSort(groupUniqArrayMap(JSONAllPathsWithTypes(data))), sum(cityHash64(toString(data))) FROM ghdata_2_from_string) = \ + (SELECT mapSort(groupUniqArrayMap(JSONAllPathsWithTypes(data))), sum(cityHash64(toString(data))) FROM ghdata_2)" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_string" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_from_string" diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.reference b/tests/queries/0_stateless/01825_new_type_json_in_array.reference new file mode 100644 index 00000000000..aa33d9a7413 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.reference @@ -0,0 +1,30 @@ +{"id":1,"arr":[{"k1":"1","k2":{"k3":"2","k4":"3"}},{"k1":"2","k2":{"k5":"foo"}}]} +{"id":2,"arr":[{"k1":"3","k2":{"k3":"4","k4":"5"}}]} +1 [1,2] [2,NULL] [3,NULL] [NULL,'foo'] +2 [3] [4] [5] [NULL] +{"arr":{"k1":"1","k2":{"k3":"2","k4":"3"}}} +{"arr":{"k1":"2","k2":{"k5":"foo"}}} +{"arr":{"k1":"3","k2":{"k3":"4","k4":"5"}}} +('k1','Int64') +('k2.k3','Int64') +('k2.k4','Int64') +('k2.k5','String') +{"id":1,"arr":[{"k1":[{"k2":"aaa","k3":"bbb"},{"k2":"ccc"}]}]} +{"id":2,"arr":[{"k1":[{"k3":"ddd","k4":"10"},{"k4":"20"}],"k5":{"k6":"foo"}}]} +1 [['aaa','ccc']] [['bbb',NULL]] [[NULL,NULL]] [NULL] +2 [[NULL,NULL]] [['ddd',NULL]] [[10,20]] ['foo'] +{"k1":{"k2":"aaa","k3":"bbb"}} +{"k1":{"k2":"ccc"}} +{"k1":{"k3":"ddd","k4":"10"}} +{"k1":{"k4":"20"}} +('k2','String') +('k3','String') +('k4','Int64') +[['{"k2":"aaa","k3":"bbb"}','{"k2":"ccc"}']] +[['{"k3":"ddd","k4":"10"}','{"k4":"20"}']] +{"arr":[{"x":1}]} +{"arr":{"x":{"y":1},"t":{"y":2}}} +{"arr":[1,{"y":1}]} +{"arr":[2,{"y":2}]} +{"arr":[{"x":"aaa","y":["1","2","3"]}]} +{"arr":[{"x":1}]} diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.sql b/tests/queries/0_stateless/01825_new_type_json_in_array.sql new file mode 100644 index 00000000000..42ab1f64681 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.sql @@ -0,0 +1,39 @@ +-- Tags: no-fasttest + +SET allow_experimental_json_type = 1; +SET allow_experimental_analyzer = 1; +DROP TABLE IF EXISTS t_json_array; + +CREATE TABLE t_json_array (id UInt32, arr Array(JSON)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": 1, "k2": {"k3": 2, "k4": 3}}, {"k1": 2, "k2": {"k5": "foo"}}]} + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": 3, "k2": {"k3": 4, "k4": 5}}]} + + +SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; +SELECT id, arr.k1, arr.k2.k3, arr.k2.k4, arr.k2.k5 FROM t_json_array ORDER BY id; +SELECT arr FROM t_json_array ARRAY JOIN arr ORDER BY arr.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arr))) as path FROM t_json_array order by path; + +TRUNCATE TABLE t_json_array; + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": [{"k2": "aaa", "k3": "bbb"}, {"k2": "ccc"}]}]} + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": [{"k3": "ddd", "k4": 10}, {"k4": 20}], "k5": {"k6": "foo"}}]} + +SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; +SELECT id, arr.k1[].k2, arr.k1[].k3, arr.k1[].k4, arr.k5.k6 FROM t_json_array ORDER BY id; + +SELECT arrayJoin(arrayJoin(arr.k1[])) AS k1 FROM t_json_array ORDER BY toString(k1) FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arr.k1[])))) AS path FROM t_json_array order by path; + +SELECT arr.k1 FROM t_json_array GROUP BY arr.k1 ORDER BY toString(arr.k1); + +DROP TABLE t_json_array; + +SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; +SELECT * FROM values('arr Map(String, JSON)', '{\'x\' : \'{"y" : 1}\', \'t\' : \'{"y" : 2}\'}') FORMAT JSONEachRow; +SELECT * FROM values('arr Tuple(Int32, JSON)', '(1, \'{"y" : 1}\')', '(2, \'{"y" : 2}\')') FORMAT JSONEachRow; +SELECT * FROM format(JSONEachRow, '{"arr" : [{"x" : "aaa", "y" : [1,2,3]}]}') FORMAT JSONEachRow; +SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/01825_new_type_json_in_other_types.reference b/tests/queries/0_stateless/01825_new_type_json_in_other_types.reference new file mode 100644 index 00000000000..03913e5098e --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_in_other_types.reference @@ -0,0 +1,17 @@ +Tuple(String, Map(String, Array(JSON)), JSON) +============= +{"id":1,"data":["foo",{"aa":[{"k1":[{"k2":"1","k3":"2"},{"k3":"3"}]},{"k1":[{"k2":"4"},{"k3":"5"},{"k2":"6"}],"k4":"qqq"}],"bb":[{"k4":"www"},{"k1":[{"k2":"7","k3":"8"},{"k2":"9","k3":"10"},{"k2":"11","k3":"12"}]}]},{"k1":"aa","k2":{"k3":"bb","k4":"c"}}]} +{"id":2,"data":["bar",{"aa":[{"k1":[{"k2":"13","k3":"14"},{"k2":"15","k3":"16"}],"k4":"www"}]},{}]} +{"id":3,"data":["some",{"aa":[{"k1":[{"k3":"20","k5":"some"}]}]},{"k1":"eee"}]} +============= +{"aa":[{"k1":[{"k2":"1","k3":"2"},{"k3":"3"}]},{"k1":[{"k2":"4"},{"k3":"5"},{"k2":"6"}],"k4":"qqq"}],"bb":[{"k4":"www"},{"k1":[{"k2":"7","k3":"8"},{"k2":"9","k3":"10"},{"k2":"11","k3":"12"}]}]} +{"aa":[{"k1":[{"k2":"13","k3":"14"},{"k2":"15","k3":"16"}],"k4":"www"}],"bb":[]} +{"aa":[{"k1":[{"k3":"20","k5":"some"}]}],"bb":[]} +============= +{"k1":[[{"k2":"1","k3":"2"},{"k3":"3"}],[{"k2":"4"},{"k3":"5"},{"k2":"6"}]],"k4":[null,"qqq"]} +{"k1":[[{"k2":"13","k3":"14"},{"k2":"15","k3":"16"}]],"k4":["www"]} +{"k1":[[{"k3":"20","k5":"some"}]],"k4":[null]} +============= +{"obj":{"k1":"aa","k2":{"k3":"bb","k4":"c"}}} +{"obj":{}} +{"obj":{"k1":"eee"}} diff --git a/tests/queries/0_stateless/01825_new_type_json_in_other_types.sh b/tests/queries/0_stateless/01825_new_type_json_in_other_types.sh new file mode 100755 index 00000000000..1c7b64c73a1 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_in_other_types.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_nested" + +${CLICKHOUSE_CLIENT} -q " + CREATE TABLE t_json_nested + ( + id UInt32, + data Tuple(String, Map(String, Array(JSON)), JSON) + ) + ENGINE = MergeTree ORDER BY id" --allow_experimental_json_type 1 + +cat < 1; + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; +SELECT id, data FROM type_json_dst ORDER BY id; + +INSERT INTO type_json_dst VALUES (4, '{"arr": [{"k11": 5, "k22": 6}, {"k11": 7, "k33": 8}]}'); + +INSERT INTO type_json_src VALUES (5, '{"arr": "not array"}'); + +INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; + +TRUNCATE TABLE type_json_src; +INSERT INTO type_json_src VALUES (6, '{"arr": [{"k22": "str1"}]}'); + +INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; +SELECT id, data FROM type_json_dst ORDER BY id; + +DROP TABLE type_json_src; +DROP TABLE type_json_dst; + +CREATE TABLE type_json_dst (data JSON) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE type_json_src (data String) ENGINE = MergeTree ORDER BY tuple(); + +SYSTEM STOP MERGES type_json_src; + +SET max_threads = 1; +SET max_insert_threads = 1; +SET output_format_json_named_tuples_as_objects = 1; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; + +INSERT INTO type_json_dst SELECT data FROM type_json_src; + +SELECT * FROM type_json_dst ORDER BY data.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; + +TRUNCATE TABLE type_json_src; +TRUNCATE TABLE type_json_dst; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; + +INSERT INTO type_json_dst SELECT data FROM type_json_src; + +SELECT * FROM type_json_dst ORDER BY data.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; + +DROP TABLE type_json_src; +DROP TABLE type_json_dst; diff --git a/tests/queries/0_stateless/01825_new_type_json_missed_values.reference b/tests/queries/0_stateless/01825_new_type_json_missed_values.reference new file mode 100644 index 00000000000..952b5652bc1 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_missed_values.reference @@ -0,0 +1,4 @@ +('foo','Int64') +('k1','Int64') +('k2','Int64') +1 diff --git a/tests/queries/0_stateless/01825_new_type_json_missed_values.sql b/tests/queries/0_stateless/01825_new_type_json_missed_values.sql new file mode 100644 index 00000000000..84bd8a19c18 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_missed_values.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json; + +SET allow_experimental_json_type = 1; + +CREATE TABLE t_json(id UInt64, obj JSON) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +SYSTEM STOP MERGES t_json; + +INSERT INTO t_json SELECT number, '{"k1": 1, "k2": 2}' FROM numbers(1000000); +INSERT INTO t_json VALUES (1000001, '{"foo": 1}'); + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) AS path FROM t_json ORDER BY path; +SELECT count() FROM t_json WHERE obj.foo IS NOT NULL; + +DROP TABLE IF EXISTS t_json; diff --git a/tests/queries/0_stateless/01825_new_type_json_multiple_files.reference b/tests/queries/0_stateless/01825_new_type_json_multiple_files.reference new file mode 100644 index 00000000000..63c12792c17 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_multiple_files.reference @@ -0,0 +1,22 @@ +{"data":{"k0":"100"}} +{"data":{"k1":"100"}} +{"data":{"k2":"100"}} +{"data":{"k3":"100"}} +{"data":{"k4":"100"}} +{"data":{"k5":"100"}} +('k0','Int64') +('k1','Int64') +('k2','Int64') +('k3','Int64') +('k4','Int64') +('k5','Int64') +{"data":{"k0":"100"}} +{"data":{"k1":"100"}} +{"data":{"k2":"100"}} +('k0','Int64') +('k1','Int64') +('k2','Int64') +{"data":{"k1":"100"}} +{"data":{"k3":"100"}} +('k1','Int64') +('k3','Int64') diff --git a/tests/queries/0_stateless/01825_new_type_json_multiple_files.sh b/tests/queries/0_stateless/01825_new_type_json_multiple_files.sh new file mode 100755 index 00000000000..9cb37987628 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_multiple_files.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +for f in "${USER_FILES_PATH:?}/${CLICKHOUSE_DATABASE}"_*.json; do + [ -e $f ] && rm $f +done + +for i in {0..5}; do + echo "{\"k$i\": 100}" > "$USER_FILES_PATH/${CLICKHOUSE_DATABASE}_$i.json" +done + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_files" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_files (file String, data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_files SELECT _file, data FROM file('${CLICKHOUSE_DATABASE}_*.json', 'JSONAsObject', 'data JSON')" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT data FROM t_json_files ORDER BY file FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_files ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "TRUNCATE TABLE IF EXISTS t_json_files" + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_files \ + SELECT _file, data FROM file('${CLICKHOUSE_DATABASE}_*.json', 'JSONAsObject', 'data JSON') \ + ORDER BY _file LIMIT 3" --max_threads 1 --min_insert_block_size_rows 1 --max_insert_block_size 1 --max_block_size 1 --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT data FROM t_json_files ORDER BY file, data FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_files ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "TRUNCATE TABLE IF EXISTS t_json_files" + +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_files \ + SELECT _file, data FROM file('${CLICKHOUSE_DATABASE}_*.json', 'JSONAsObject', 'data JSON') \ + WHERE _file IN ('${CLICKHOUSE_DATABASE}_1.json', '${CLICKHOUSE_DATABASE}_3.json')" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT data FROM t_json_files ORDER BY file FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_files ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_files" +rm "$USER_FILES_PATH"/${CLICKHOUSE_DATABASE}_*.json diff --git a/tests/queries/0_stateless/01825_new_type_json_mutations.reference b/tests/queries/0_stateless/01825_new_type_json_mutations.reference new file mode 100644 index 00000000000..c7523661a3b --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_mutations.reference @@ -0,0 +1,7 @@ +1 q {"k1":"1","k2":"2","k3":[{"k4":"aaa"},{"k4":"bbb"}]} +2 w {"k1":"3","k2":"4","k3":[{"k4":"ccc"}]} +3 e {"k1":"5","k2":"6"} +1 q {"k1":"1","k2":"2","k3":[{"k4":"aaa"},{"k4":"bbb"}]} +3 e {"k1":"5","k2":"6"} +1 foo +3 foo diff --git a/tests/queries/0_stateless/01825_new_type_json_mutations.sql b/tests/queries/0_stateless/01825_new_type_json_mutations.sql new file mode 100644 index 00000000000..77feee692d9 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_mutations.sql @@ -0,0 +1,21 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_mutations; + +SET allow_experimental_json_type = 1; +SET output_format_json_named_tuples_as_objects = 1; +SET mutations_sync = 2; + +CREATE TABLE t_json_mutations(id UInt32, s String, obj JSON) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json_mutations VALUES (1, 'q', '{"k1": 1, "k2": 2, "k3": [{"k4": "aaa"}, {"k4": "bbb"}]}'); +INSERT INTO t_json_mutations VALUES (2, 'w', '{"k1": 3, "k2": 4, "k3": [{"k4": "ccc"}]}'); +INSERT INTO t_json_mutations VALUES (3, 'e', '{"k1": 5, "k2": 6}'); + +SELECT * FROM t_json_mutations ORDER BY id; +ALTER TABLE t_json_mutations DELETE WHERE id = 2; +SELECT * FROM t_json_mutations ORDER BY id; +ALTER TABLE t_json_mutations DROP COLUMN s, DROP COLUMN obj, ADD COLUMN t String DEFAULT 'foo'; +SELECT * FROM t_json_mutations ORDER BY id; + +DROP TABLE t_json_mutations; diff --git a/tests/queries/0_stateless/01825_new_type_json_nbagames.reference b/tests/queries/0_stateless/01825_new_type_json_nbagames.reference new file mode 100644 index 00000000000..9be03136b68 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_nbagames.reference @@ -0,0 +1,61 @@ +1000 +('_id.$oid','String') +('date.$date','String') +('teams','Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))') +('abbreviation','String') +('city','String') +('home','Bool') +('name','String') +('players','Array(JSON(max_dynamic_types=8, max_dynamic_paths=64))') +('results.ast','Int64') +('results.blk','Int64') +('results.drb','Int64') +('results.fg','Int64') +('results.fg3','Int64') +('results.fg3_pct','String') +('results.fg3a','Int64') +('results.fg_pct','String') +('results.fga','Int64') +('results.ft','Int64') +('results.ft_pct','String') +('results.fta','Int64') +('results.mp','Int64') +('results.orb','Int64') +('results.pf','Int64') +('results.pts','Int64') +('results.stl','Int64') +('results.tov','Int64') +('results.trb','Int64') +('score','Int64') +('won','Int64') +Boston Celtics 70 +Los Angeles Lakers 64 +Milwaukee Bucks 61 +Philadelphia 76ers 57 +Atlanta Hawks 55 +('ast','Int64') +('blk','Int64') +('drb','Int64') +('fg','Int64') +('fg3','Int64') +('fg3_pct','String') +('fg3a','Int64') +('fg_pct','String') +('fga','Int64') +('ft','Int64') +('ft_pct','String') +('fta','Int64') +('mp','String') +('orb','Int64') +('pf','Int64') +('player','String') +('pts','Int64') +('stl','Int64') +('tov','Int64') +('trb','Int64') +Larry Bird 10 +Clyde Drexler 4 +Alvin Robertson 3 +Magic Johnson 3 +Charles Barkley 2 +1 diff --git a/tests/queries/0_stateless/01825_new_type_json_nbagames.sh b/tests/queries/0_stateless/01825_new_type_json_nbagames.sh new file mode 100755 index 00000000000..20eba88eda4 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_nbagames.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames_string" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames_from_string" + +${CLICKHOUSE_CLIENT} -q "CREATE TABLE nbagames (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +cat $CUR_DIR/data_json/nbagames_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO nbagames FORMAT JSONAsObject" + +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM nbagames WHERE NOT ignore(*)" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) as path from nbagames order by path" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(data.teams[]))) as path from nbagames order by path" + +${CLICKHOUSE_CLIENT} --allow_experimental_analyzer=1 -q \ + "SELECT teams.name.:String AS name, sum(teams.won.:Int64) AS wins FROM nbagames \ + ARRAY JOIN data.teams[] AS teams GROUP BY name \ + ORDER BY wins DESC LIMIT 5;" + +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(data.teams[].players[])))) as path from nbagames order by path" + +${CLICKHOUSE_CLIENT} --allow_experimental_analyzer=1 -q \ +"SELECT player, sum(triple_double) AS triple_doubles FROM \ +( \ + SELECT \ + arrayJoin(arrayJoin(data.teams[].players[])) as players, \ + players.player.:String as player, \ + ((players.pts.:Int64 >= 10) + \ + (players.ast.:Int64 >= 10) + \ + (players.blk.:Int64 >= 10) + \ + (players.stl.:Int64 >= 10) + \ + (players.trb.:Int64 >= 10)) >= 3 AS triple_double \ + from nbagames \ +) \ +GROUP BY player ORDER BY triple_doubles DESC, player LIMIT 5" + +${CLICKHOUSE_CLIENT} -q "CREATE TABLE nbagames_string (data String) ENGINE = MergeTree ORDER BY tuple()" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE nbagames_from_string (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_json_type 1 + +cat $CUR_DIR/data_json/nbagames_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO nbagames_string FORMAT JSONAsString" +${CLICKHOUSE_CLIENT} -q "INSERT INTO nbagames_from_string SELECT data FROM nbagames_string" + +${CLICKHOUSE_CLIENT} -q "SELECT \ + (SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), sum(cityHash64(toString(data))) FROM nbagames_from_string) = \ + (SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), sum(cityHash64(toString(data))) FROM nbagames)" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames_string" +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS nbagames_from_string" diff --git a/tests/queries/0_stateless/01825_new_type_json_order_by.reference b/tests/queries/0_stateless/01825_new_type_json_order_by.reference new file mode 100644 index 00000000000..611d2835127 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_order_by.reference @@ -0,0 +1,6 @@ +0 +0 +{"k":"v"} + +{"k":"v"} +{"k":"v"} diff --git a/tests/queries/0_stateless/01825_new_type_json_order_by.sql b/tests/queries/0_stateless/01825_new_type_json_order_by.sql new file mode 100644 index 00000000000..6b5fb40aed4 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_order_by.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SET allow_experimental_json_type = 1; +SELECT dummy FROM system.one ORDER BY materialize('{"k":"v"}'::JSON); +SELECT dummy FROM system.one ORDER BY materialize('{"k":"v"}'::JSON), dummy; +SELECT materialize('{"k":"v"}'::JSON) SETTINGS extremes = 1; diff --git a/tests/queries/0_stateless/01825_new_type_json_parallel_insert.reference b/tests/queries/0_stateless/01825_new_type_json_parallel_insert.reference new file mode 100644 index 00000000000..7cf3855d684 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_parallel_insert.reference @@ -0,0 +1 @@ +{'k1':['Int64'],'k2':['String']} 500000 diff --git a/tests/queries/0_stateless/01825_new_type_json_parallel_insert.sql b/tests/queries/0_stateless/01825_new_type_json_parallel_insert.sql new file mode 100644 index 00000000000..a8457ff4f15 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_parallel_insert.sql @@ -0,0 +1,10 @@ +-- Tags: long +DROP TABLE IF EXISTS t_json_parallel; + +SET allow_experimental_json_type = 1, max_insert_threads = 20, max_threads = 20, min_insert_block_size_rows = 65536; +CREATE TABLE t_json_parallel (data JSON) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_json_parallel SELECT materialize('{"k1":1, "k2": "some"}') FROM numbers_mt(500000); +SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), count() FROM t_json_parallel; + +DROP TABLE t_json_parallel; diff --git a/tests/queries/0_stateless/01825_new_type_json_partitions.reference b/tests/queries/0_stateless/01825_new_type_json_partitions.reference new file mode 100644 index 00000000000..c5839472132 --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_partitions.reference @@ -0,0 +1,2 @@ +{"id":1,"obj":{"k1":"v1"}} +{"id":2,"obj":{"k2":"v2"}} diff --git a/tests/queries/0_stateless/01825_new_type_json_partitions.sql b/tests/queries/0_stateless/01825_new_type_json_partitions.sql new file mode 100644 index 00000000000..d1f37dedded --- /dev/null +++ b/tests/queries/0_stateless/01825_new_type_json_partitions.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_partitions; + +SET allow_experimental_json_type = 1; + +CREATE TABLE t_json_partitions (id UInt32, obj JSON) +ENGINE MergeTree ORDER BY id PARTITION BY id; + +INSERT INTO t_json_partitions FORMAT JSONEachRow {"id": 1, "obj": {"k1": "v1"}} {"id": 2, "obj": {"k2": "v2"}}; + +SELECT * FROM t_json_partitions ORDER BY id FORMAT JSONEachRow; + +DROP TABLE t_json_partitions; diff --git a/tests/queries/0_stateless/01825_type_json_10.sql b/tests/queries/0_stateless/01825_type_json_10.sql index e13026770f6..3ddbf85ba63 100644 --- a/tests/queries/0_stateless/01825_type_json_10.sql +++ b/tests/queries/0_stateless/01825_type_json_10.sql @@ -4,7 +4,7 @@ SET allow_experimental_object_type = 1; SET output_format_json_named_tuples_as_objects = 1; DROP TABLE IF EXISTS t_json_10; -CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; +CREATE TABLE t_json_10 (o Object('json')) ENGINE = Memory; INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 1, "c": [{"d": 10, "e": [31]}, {"d": 20, "e": [63, 127]}]}} {"a": {"b": 2, "c": []}} diff --git a/tests/queries/0_stateless/01825_type_json_11.sh b/tests/queries/0_stateless/01825_type_json_11.sh index dbed15c8bb9..6109dff53a6 100755 --- a/tests/queries/0_stateless/01825_type_json_11.sh +++ b/tests/queries/0_stateless/01825_type_json_11.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_11" -$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_11 (obj JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_object_type 1 +$CLICKHOUSE_CLIENT -q "CREATE TABLE t_json_11 (obj Object('json')) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_object_type 1 cat < range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client=1 -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client=0 -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5K' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='5k' -n -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client='5K' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_result_bytes 0 --max_memory_usage_in_client='5k' -q "SELECT arrayMap(x -> range(x), range(number)) FROM numbers(1000) -- { clientError MEMORY_LIMIT_EXCEEDED }" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1M' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='23G' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" -$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -n -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2P' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='2.1p' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10E' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='10.2e' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1.1T' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='-1' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_NUMBER" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='1m' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='14g' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" +$CLICKHOUSE_CLIENT --max_memory_usage_in_client='11t' -q "SELECT * FROM (SELECT * FROM system.numbers LIMIT 600000) as num WHERE num.number=60000" 2>&1 | grep -c -F "CANNOT_PARSE_INPUT_ASSERTION_FAILED" diff --git a/tests/queries/0_stateless/02021_create_database_with_comment.sh b/tests/queries/0_stateless/02021_create_database_with_comment.sh index f77397dc482..d87b0794c91 100755 --- a/tests/queries/0_stateless/02021_create_database_with_comment.sh +++ b/tests/queries/0_stateless/02021_create_database_with_comment.sh @@ -20,7 +20,7 @@ function test_db_comments() local ENGINE_NAME="$1" echo "engine : ${ENGINE_NAME}" - $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -nm <& /dev/null +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' >& /dev/null echo $? echo 'regression test for overlap profile events snapshots between queries' -$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' +$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'regression test for overlap profile events snapshots between queries (clickhouse-local)' -$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -n -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' +$CLICKHOUSE_LOCAL --print-profile-events --profile-events-delay-ms=-1 -q 'select 1; select 1' |& grep -F -o '[ 0 ] SelectedRows: 1 (increment)' echo 'print everything' profile_events="$( @@ -35,5 +35,5 @@ profile_events="$( test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)" echo 'check that ProfileEvents is new for each query' -sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -n -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') +sleep_function_calls=$($CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -q 'select sleep(1); select 1' |& grep -c 'SleepFunctionCalls') test "$sleep_function_calls" -eq 1 && echo OK || echo "FAIL ($sleep_function_calls)" diff --git a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference index a3bac432482..deabef61a88 100644 --- a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference +++ b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.reference @@ -5,9 +5,9 @@ insert into utf8_overlap values ('\xe2'), ('Foo⚊BarBazBam'), ('\xe2'), ('Foo -- MONOGRAM FOR YANG with lowerUTF8(str) as l_, upperUTF8(str) as u_, '0x' || hex(str) as h_ select length(str), if(l_ == '\xe2', h_, l_), if(u_ == '\xe2', h_, u_) from utf8_overlap format CSV; -1,"0xE2","0xE2" +1,"�","�" 15,"foo⚊barbazbam","FOO⚊BARBAZBAM" -1,"0xE2","0xE2" +1,"�","�" 15,"foo⚊barbazbam","FOO⚊BARBAZBAM" -- NOTE: regression test for introduced bug -- https://github.com/ClickHouse/ClickHouse/issues/42756 diff --git a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql index 8ca0a3f5f75..d175e0659d0 100644 --- a/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql +++ b/tests/queries/0_stateless/02071_lower_upper_utf8_row_overlaps.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + drop table if exists utf8_overlap; create table utf8_overlap (str String) engine=Memory(); diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 32e8b2f4312..638a46a142f 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -510,9 +510,15 @@ CREATE TABLE system.parts `rows_where_ttl_info.max` Array(DateTime), `projections` Array(String), `visible` UInt8, - `creation_tid` Tuple(UInt64, UInt64, UUID), + `creation_tid` Tuple( + UInt64, + UInt64, + UUID), `removal_tid_lock` UInt64, - `removal_tid` Tuple(UInt64, UInt64, UUID), + `removal_tid` Tuple( + UInt64, + UInt64, + UUID), `creation_csn` UInt64, `removal_csn` UInt64, `has_lightweight_delete` UInt8, diff --git a/tests/queries/0_stateless/02122_join_group_by_timeout.sh b/tests/queries/0_stateless/02122_join_group_by_timeout.sh index b4644878544..0e89fcf56d9 100755 --- a/tests/queries/0_stateless/02122_join_group_by_timeout.sh +++ b/tests/queries/0_stateless/02122_join_group_by_timeout.sh @@ -15,8 +15,9 @@ fi # TCP CLIENT: As of today (02/12/21) uses PullingAsyncPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) +### However, in the test, the server can be overloaded, so we assert query duration in the interval of 1 to 60 seconds. query_id=$(random_str 12) -$CLICKHOUSE_CLIENT --query_id "$query_id" --max_execution_time 1 -q " +$CLICKHOUSE_CLIENT --max_result_rows 0 --max_result_bytes 0 --query_id "$query_id" --max_execution_time 1 -q " SELECT * FROM ( SELECT a.name as n @@ -33,12 +34,12 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" --max_execution_time 1 -q " FORMAT Null " 2>&1 | grep -m1 -o "Code: 159" $CLICKHOUSE_CLIENT -q "system flush logs" -${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" +${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) BETWEEN 1 AND 60 from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" ### Should stop pulling data and return what has been generated already (return code 0) query_id=$(random_str 12) -$CLICKHOUSE_CLIENT --query_id "$query_id" -q " +$CLICKHOUSE_CLIENT --max_result_rows 0 --max_result_bytes 0 --query_id "$query_id" -q " SELECT a.name as n FROM ( @@ -52,12 +53,12 @@ $CLICKHOUSE_CLIENT --query_id "$query_id" -q " " echo $? $CLICKHOUSE_CLIENT -q "system flush logs" -${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" +${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) BETWEEN 1 AND 60 from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" # HTTP CLIENT: As of today (02/12/21) uses PullingPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) -${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_execution_time=1" -d " +${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_execution_time=1&max_result_rows=0&max_result_bytes=0" -d " SELECT * FROM ( SELECT a.name as n @@ -76,7 +77,7 @@ ${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_execution_tim ### Should stop pulling data and return what has been generated already (return code 0) -${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL" -d " +${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_result_rows=0&max_result_bytes=0" -d " SELECT a.name as n FROM ( diff --git a/tests/queries/0_stateless/02136_kill_scalar_queries.sh b/tests/queries/0_stateless/02136_kill_scalar_queries.sh index c8691b62360..f8bd5a42756 100755 --- a/tests/queries/0_stateless/02136_kill_scalar_queries.sh +++ b/tests/queries/0_stateless/02136_kill_scalar_queries.sh @@ -10,7 +10,7 @@ function wait_for_query_to_start() } QUERY_1_ID="${CLICKHOUSE_DATABASE}_TEST02132KILL_QUERY1" -(${CLICKHOUSE_CLIENT} --query_id="${QUERY_1_ID}" --query='select (SELECT max(number) from system.numbers) + 1;' 2>&1 | grep -q "Code: 394." || echo 'FAIL') & +(${CLICKHOUSE_CLIENT} --max_rows_to_read 0 --query_id="${QUERY_1_ID}" --query='select (SELECT max(number) from system.numbers) + 1;' 2>&1 | grep -q "Code: 394." || echo 'FAIL') & wait_for_query_to_start "${QUERY_1_ID}" ${CLICKHOUSE_CLIENT} --query="KILL QUERY WHERE query_id='${QUERY_1_ID}' SYNC" diff --git a/tests/queries/0_stateless/02136_scalar_progress.reference b/tests/queries/0_stateless/02136_scalar_progress.reference index 5378c52de89..b8957f78e6d 100644 --- a/tests/queries/0_stateless/02136_scalar_progress.reference +++ b/tests/queries/0_stateless/02136_scalar_progress.reference @@ -1,6 +1,7 @@ < X-ClickHouse-Progress: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"0","result_bytes":"0"} < X-ClickHouse-Progress: {"read_rows":"65505","read_bytes":"524040","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"0","result_bytes":"0"} < X-ClickHouse-Progress: {"read_rows":"100000","read_bytes":"800000","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"0","result_bytes":"0"} -< X-ClickHouse-Progress: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"0","result_bytes":"0"} -< X-ClickHouse-Progress: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"1","result_bytes":"272"} -< X-ClickHouse-Summary: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100000","result_rows":"1","result_bytes":"272"} +< X-ClickHouse-Progress: {"read_rows":"100000","read_bytes":"800000","written_rows":"0","written_bytes":"0","total_rows_to_read":"100001","result_rows":"0","result_bytes":"0"} +< X-ClickHouse-Progress: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100001","result_rows":"0","result_bytes":"0"} +< X-ClickHouse-Progress: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100001","result_rows":"1","result_bytes":"272"} +< X-ClickHouse-Summary: {"read_rows":"100001","read_bytes":"800001","written_rows":"0","written_bytes":"0","total_rows_to_read":"100001","result_rows":"1","result_bytes":"272"} diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference index 0bb8966cbe4..0e74c0a083e 100644 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') -CREATE TABLE foo.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh index 934d87616ac..3a95e59416a 100755 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh @@ -4,5 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' -$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' +$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' +$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' diff --git a/tests/queries/0_stateless/02151_lc_prefetch.sql b/tests/queries/0_stateless/02151_lc_prefetch.sql index c2b97231145..f8c76038120 100644 --- a/tests/queries/0_stateless/02151_lc_prefetch.sql +++ b/tests/queries/0_stateless/02151_lc_prefetch.sql @@ -3,5 +3,6 @@ drop table if exists tab_lc; CREATE TABLE tab_lc (x UInt64, y LowCardinality(String)) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; insert into tab_lc select number, toString(number % 10) from numbers(20000000); optimize table tab_lc; +SET max_rows_to_read = '21M'; select count() from tab_lc where y == '0' settings local_filesystem_read_prefetch=1; drop table if exists tab_lc; diff --git a/tests/queries/0_stateless/02161_addressToLineWithInlines.sql b/tests/queries/0_stateless/02161_addressToLineWithInlines.sql index cf400ed34c5..d7ce133f38c 100644 --- a/tests/queries/0_stateless/02161_addressToLineWithInlines.sql +++ b/tests/queries/0_stateless/02161_addressToLineWithInlines.sql @@ -6,7 +6,7 @@ SELECT addressToLineWithInlines(1); -- { serverError FUNCTION_NOT_ALLOWED } SET allow_introspection_functions = 1; SET query_profiler_real_time_period_ns = 0; SET query_profiler_cpu_time_period_ns = 1000000; -SET log_queries = 1; +SET log_queries = 1, max_rows_to_read = 0; SELECT count() FROM numbers_mt(10000000000) SETTINGS log_comment='02161_test_case'; SET log_queries = 0; SET query_profiler_cpu_time_period_ns = 0; diff --git a/tests/queries/0_stateless/02177_issue_31009.sql b/tests/queries/0_stateless/02177_issue_31009.sql index f25df59f4b4..5c62b5a9c2f 100644 --- a/tests/queries/0_stateless/02177_issue_31009.sql +++ b/tests/queries/0_stateless/02177_issue_31009.sql @@ -8,6 +8,8 @@ DROP TABLE IF EXISTS right; CREATE TABLE left ( key UInt32, value String ) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; CREATE TABLE right ( key UInt32, value String ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +SET max_rows_to_read = '50M'; + INSERT INTO left SELECT number, toString(number) FROM numbers(25367182); INSERT INTO right SELECT number, toString(number) FROM numbers(23124707); diff --git a/tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 b/tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 index 47940356302..7df77595347 100644 --- a/tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 +++ b/tests/queries/0_stateless/02177_issue_31009_pt2.sql.j2 @@ -1,4 +1,5 @@ --- Tags: long +-- Tags: long, no-flaky-check +-- It can be too long with ThreadFuzzer DROP TABLE IF EXISTS left; DROP TABLE IF EXISTS right; diff --git a/tests/queries/0_stateless/02210_processors_profile_log.reference b/tests/queries/0_stateless/02210_processors_profile_log.reference index 035bd9897ad..12ba17103da 100644 --- a/tests/queries/0_stateless/02210_processors_profile_log.reference +++ b/tests/queries/0_stateless/02210_processors_profile_log.reference @@ -6,6 +6,6 @@ ExpressionTransform ExpressionTransform 1 1 1 1 1 LazyOutputFormat 1 1 1 0 0 LimitsCheckingTransform 1 1 1 1 1 -NullSource 1 0 0 0 0 -NullSource 1 0 0 0 0 +NullSource 0 0 0 0 0 +NullSource 0 0 0 0 0 SourceFromSingleChunk 1 0 0 1 1 diff --git a/tests/queries/0_stateless/02210_processors_profile_log.sql b/tests/queries/0_stateless/02210_processors_profile_log.sql index 75e5bcbb585..a850f4312b3 100644 --- a/tests/queries/0_stateless/02210_processors_profile_log.sql +++ b/tests/queries/0_stateless/02210_processors_profile_log.sql @@ -20,8 +20,8 @@ SELECT -- SourceFromSingleChunk, that feed data to ExpressionTransform, -- will feed first block and then wait in PortFull. name = 'SourceFromSingleChunk', output_wait_elapsed_us >= 0.9e6 ? 1 : output_wait_elapsed_us, - -- NullSource/LazyOutputFormatLazyOutputFormat are the outputs - -- so they cannot starts to execute before sleep(1) will be executed. + -- LazyOutputFormatLazyOutputFormat is the output + -- so it cannot starts to execute before sleep(1) will be executed. input_wait_elapsed_us>=1e6 ? 1 : input_wait_elapsed_us) elapsed, input_rows, diff --git a/tests/queries/0_stateless/02210_toColumnTypeName_toLowCardinality_const.reference b/tests/queries/0_stateless/02210_toColumnTypeName_toLowCardinality_const.reference index 1e3d3a50562..e3978020431 100644 --- a/tests/queries/0_stateless/02210_toColumnTypeName_toLowCardinality_const.reference +++ b/tests/queries/0_stateless/02210_toColumnTypeName_toLowCardinality_const.reference @@ -1 +1 @@ -Const(ColumnLowCardinality) +Const(LowCardinality(UInt8)) diff --git a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh index 3c44a2a7ba7..a382b3859f3 100755 --- a/tests/queries/0_stateless/02221_parallel_replicas_bug.sh +++ b/tests/queries/0_stateless/02221_parallel_replicas_bug.sh @@ -4,4 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 -nm < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null +${CLICKHOUSE_CLIENT} --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 -m < "$CURDIR"/01099_parallel_distributed_insert_select.sql > /dev/null diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh index db94c59d2de..e23a272a4e8 100755 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table_2" -${CLICKHOUSE_CLIENT} -n -q" +${CLICKHOUSE_CLIENT} -q" CREATE TABLE sample_table ( key UInt64 ) @@ -16,7 +16,7 @@ ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_ ORDER BY tuple(); " -${CLICKHOUSE_CLIENT} -n -q" +${CLICKHOUSE_CLIENT} -q" CREATE TABLE sample_table_2 ( key UInt64 ) diff --git a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh index c62ec14b340..6381d811d5d 100755 --- a/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh +++ b/tests/queries/0_stateless/02221_system_zookeeper_unrestricted_like.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS sample_table_2;" -${CLICKHOUSE_CLIENT} -n --query="CREATE TABLE sample_table ( +${CLICKHOUSE_CLIENT} --query="CREATE TABLE sample_table ( key UInt64 ) ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_system_zookeeper_unrestricted_like', '1') @@ -16,7 +16,7 @@ ORDER BY tuple(); DROP TABLE IF EXISTS sample_table SYNC;" -${CLICKHOUSE_CLIENT} -n --query "CREATE TABLE sample_table_2 ( +${CLICKHOUSE_CLIENT} --query "CREATE TABLE sample_table_2 ( key UInt64 ) ENGINE ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02221_system_zookeeper_unrestricted_like_2', '1') diff --git a/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh b/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh index 376a49fd820..63111cc32e4 100755 --- a/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh +++ b/tests/queries/0_stateless/02225_parallel_distributed_insert_select_view.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists dst_02225; drop table if exists src_02225; create table dst_02225 (key Int) engine=Memory(); @@ -14,7 +14,7 @@ create table src_02225 (key Int) engine=Memory(); insert into src_02225 values (1); " -$CLICKHOUSE_CLIENT --param_database=$CLICKHOUSE_DATABASE -nm -q " +$CLICKHOUSE_CLIENT --param_database=$CLICKHOUSE_DATABASE -m -q " truncate table dst_02225; insert into function remote('127.{1,2}', currentDatabase(), dst_02225, key) select * from remote('127.{1,2}', view(select * from {database:Identifier}.src_02225), key) @@ -29,7 +29,7 @@ settings parallel_distributed_insert_select=2, max_distributed_depth=1; select * from dst_02225; " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table src_02225; drop table dst_02225; " diff --git a/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql b/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql index b23e5640b8f..0c150249aeb 100644 --- a/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql +++ b/tests/queries/0_stateless/02226_analyzer_or_like_combine.sql @@ -1,3 +1,5 @@ +SET allow_hyperscan = 1, max_hyperscan_regexp_length = 0, max_hyperscan_regexp_total_length = 0; + EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0; EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0, enable_analyzer = 1; EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1; diff --git a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh index bc90f4b2c11..177b373641f 100755 --- a/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh +++ b/tests/queries/0_stateless/02226_parallel_reading_from_replicas_benchmark.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data_02226; create table data_02226 (key Int) engine=MergeTree() order by key as select * from numbers(1); @@ -24,7 +24,7 @@ opts=( $CLICKHOUSE_BENCHMARK --query "select * from remote('127.1', $CLICKHOUSE_DATABASE, data_02226)" "${opts[@]}" >& /dev/null ret=$? -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table data_02226; " diff --git a/tests/queries/0_stateless/02228_unquoted_dates_in_csv_schema_inference.reference b/tests/queries/0_stateless/02228_unquoted_dates_in_csv_schema_inference.reference index be82d744a3b..56293ca0e5d 100644 --- a/tests/queries/0_stateless/02228_unquoted_dates_in_csv_schema_inference.reference +++ b/tests/queries/0_stateless/02228_unquoted_dates_in_csv_schema_inference.reference @@ -1 +1 @@ -c1 Nullable(DateTime64(9)) +c1 Nullable(DateTime) diff --git a/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh b/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh index d1a3825d286..e47a3033681 100755 --- a/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh +++ b/tests/queries/0_stateless/02232_allow_only_replicated_engine.sh @@ -12,9 +12,9 @@ ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO us ${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON Memory, TABLE ENGINE ON MergeTree, TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_memory (x UInt32) engine = Memory;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt (x UInt32) engine = ReplicatedMergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" 2>&1 | grep -o "Only tables with a Replicated engine" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_mt (x UInt32) engine = MergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt (x UInt32) engine = ReplicatedMergeTree order by x;" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" ${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.reference b/tests/queries/0_stateless/02234_cast_to_ip_address.reference index fa9c6bd0f94..b9f0a49ec4d 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.reference +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.reference @@ -37,7 +37,7 @@ IPv6 functions ::ffff:127.0.0.1 :: \N -100000000 +20000000 -- ::ffff:127.0.0.1 -- diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.sql b/tests/queries/0_stateless/02234_cast_to_ip_address.sql index 28f1afff57f..c851cfde927 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.sql +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.sql @@ -67,11 +67,11 @@ SELECT toIPv6('::.1.2.3'); --{serverError CANNOT_PARSE_IPV6} SELECT toIPv6OrDefault('::.1.2.3'); SELECT toIPv6OrNull('::.1.2.3'); -SELECT count() FROM numbers_mt(100000000) WHERE NOT ignore(toIPv6OrZero(randomString(8))); +SELECT count() FROM numbers_mt(20000000) WHERE NOT ignore(toIPv6OrZero(randomString(8))); SELECT '--'; -SELECT cast('test' , 'IPv6'); --{serverError CANNOT_PARSE_IPV6} +SELECT cast('test' , 'IPv6'); -- { serverError CANNOT_PARSE_IPV6 } SELECT cast('::ffff:127.0.0.1', 'IPv6'); SELECT '--'; diff --git a/tests/queries/0_stateless/02242_subcolumns_sizes.sql b/tests/queries/0_stateless/02242_subcolumns_sizes.sql index d29241131d3..1232e5fc1c2 100644 --- a/tests/queries/0_stateless/02242_subcolumns_sizes.sql +++ b/tests/queries/0_stateless/02242_subcolumns_sizes.sql @@ -4,7 +4,7 @@ DROP TABLE IF EXISTS t_subcolumns_sizes; SET allow_experimental_object_type = 1; -CREATE TABLE t_subcolumns_sizes (id UInt64, arr Array(UInt64), n Nullable(String), d JSON) +CREATE TABLE t_subcolumns_sizes (id UInt64, arr Array(UInt64), n Nullable(String), d Object('json')) ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 0; diff --git a/tests/queries/0_stateless/02246_flatten_tuple.sql b/tests/queries/0_stateless/02246_flatten_tuple.sql index 139f7a621ef..d09e740ee0c 100644 --- a/tests/queries/0_stateless/02246_flatten_tuple.sql +++ b/tests/queries/0_stateless/02246_flatten_tuple.sql @@ -12,7 +12,7 @@ INSERT INTO t_flatten_tuple VALUES (([(1, 'a'), (2, 'b')], 3, ('c', 4))); SELECT flattenTuple(t) AS ft, toTypeName(ft) FROM t_flatten_tuple; SET allow_experimental_object_type = 1; -CREATE TABLE t_flatten_object(data JSON) ENGINE = Memory; +CREATE TABLE t_flatten_object(data Object('json')) ENGINE = Memory; INSERT INTO t_flatten_object VALUES ('{"id": 1, "obj": {"k1": 1, "k2": {"k3": 2, "k4": [{"k5": 3}, {"k5": 4}]}}, "s": "foo"}'); INSERT INTO t_flatten_object VALUES ('{"id": 2, "obj": {"k2": {"k3": "str", "k4": [{"k6": 55}]}, "some": 42}, "s": "bar"}'); diff --git a/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh b/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh index 66417e9694a..09f9c0c8a98 100755 --- a/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh +++ b/tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function cleanup() { - $CLICKHOUSE_CLIENT -nmq " + $CLICKHOUSE_CLIENT -mq " DROP USER IF EXISTS with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; DROP USER IF EXISTS without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; DROP DATABASE IF EXISTS db_with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; @@ -15,7 +15,7 @@ function cleanup() cleanup trap cleanup EXIT -$CLICKHOUSE_CLIENT -nmq " +$CLICKHOUSE_CLIENT -mq " CREATE USER with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; CREATE USER without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME; diff --git a/tests/queries/0_stateless/02262_column_ttl.sh b/tests/queries/0_stateless/02262_column_ttl.sh index b5e29c9b2a1..c620d3b6d9c 100755 --- a/tests/queries/0_stateless/02262_column_ttl.sh +++ b/tests/queries/0_stateless/02262_column_ttl.sh @@ -14,7 +14,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # note, that this should be written in .sh since we need $CLICKHOUSE_DATABASE # not 'default' to catch text_log -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists ttl_02262; drop table if exists this_text_log; @@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -nm -q " ttl_02262_uuid=$($CLICKHOUSE_CLIENT -q "select uuid from system.tables where database = '$CLICKHOUSE_DATABASE' and name = 'ttl_02262'") -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " -- OPTIMIZE TABLE x FINAL will be done in background -- attach to it's log, via table UUID in query_id (see merger/mutator code). create materialized view this_text_log engine=Memory() as diff --git a/tests/queries/0_stateless/02286_parallel_final.sh b/tests/queries/0_stateless/02286_parallel_final.sh index 0ac510208f3..47dfad42e11 100755 --- a/tests/queries/0_stateless/02286_parallel_final.sh +++ b/tests/queries/0_stateless/02286_parallel_final.sh @@ -9,7 +9,7 @@ echo "Test intersecting ranges" test_random_values() { layers=$1 - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " drop table if exists tbl_8parts_${layers}granules_rnd; create table tbl_8parts_${layers}granules_rnd (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 % 8); insert into tbl_8parts_${layers}granules_rnd select number, 1 from numbers_mt($((layers * 8 * 8192))); @@ -29,7 +29,7 @@ echo "Test non intersecting ranges" test_sequential_values() { layers=$1 - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " drop table if exists tbl_8parts_${layers}granules_seq; create table tbl_8parts_${layers}granules_seq (key1 UInt32, sign Int8) engine = CollapsingMergeTree(sign) order by (key1) partition by (key1 / $((layers * 8192)))::UInt64; insert into tbl_8parts_${layers}granules_seq select number, 1 from numbers_mt($((layers * 8 * 8192))); diff --git a/tests/queries/0_stateless/02286_tuple_numeric_identifier.reference b/tests/queries/0_stateless/02286_tuple_numeric_identifier.reference index 21348493d1d..916cdaf83cd 100644 --- a/tests/queries/0_stateless/02286_tuple_numeric_identifier.reference +++ b/tests/queries/0_stateless/02286_tuple_numeric_identifier.reference @@ -1,4 +1,4 @@ -CREATE TABLE default.t_tuple_numeric\n(\n `t` Tuple(`1` Tuple(`2` Int32, `3` Int32), `4` Int32)\n)\nENGINE = Memory +CREATE TABLE default.t_tuple_numeric\n(\n `t` Tuple(\n `1` Tuple(\n `2` Int32,\n `3` Int32),\n `4` Int32)\n)\nENGINE = Memory {"t":{"1":{"2":2,"3":3},"4":4}} 2 3 4 2 3 4 diff --git a/tests/queries/0_stateless/02286_tuple_numeric_identifier.sql b/tests/queries/0_stateless/02286_tuple_numeric_identifier.sql index 151ff275f7b..8c26b93aedd 100644 --- a/tests/queries/0_stateless/02286_tuple_numeric_identifier.sql +++ b/tests/queries/0_stateless/02286_tuple_numeric_identifier.sql @@ -28,7 +28,7 @@ SELECT `t`.`1`.`1`, `t`.`1`.`2`, `t`.`2` FROM t_tuple_numeric; DROP TABLE t_tuple_numeric; SET allow_experimental_object_type = 1; -CREATE TABLE t_tuple_numeric (t JSON) ENGINE = Memory; +CREATE TABLE t_tuple_numeric (t Object('json')) ENGINE = Memory; INSERT INTO t_tuple_numeric FORMAT JSONEachRow {"t":{"1":{"2":2,"3":3},"4":4}} SELECT toTypeName(t) FROM t_tuple_numeric LIMIT 1; diff --git a/tests/queries/0_stateless/02293_http_header_full_summary_without_progress.sh b/tests/queries/0_stateless/02293_http_header_full_summary_without_progress.sh index a08928a773c..0d8a568fef0 100755 --- a/tests/queries/0_stateless/02293_http_header_full_summary_without_progress.sh +++ b/tests/queries/0_stateless/02293_http_header_full_summary_without_progress.sh @@ -6,8 +6,17 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh +# Sanity check to ensure that the server is up and running +for _ in {1..10}; do + echo 'SELECT 1' | ${CLICKHOUSE_CURL_COMMAND} -s "${CLICKHOUSE_URL}" --data-binary @- > /dev/null + if [ $? -eq 0 ]; then + break + fi + sleep 1 +done + CURL_OUTPUT=$(echo 'SELECT 1 + sleepEachRow(0.00002) FROM numbers(100000)' | \ - ${CLICKHOUSE_CURL_COMMAND} -vsS "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=0&max_execution_time=1" --data-binary @- 2>&1) + ${CLICKHOUSE_CURL_COMMAND} --max-time 3 -vsS "${CLICKHOUSE_URL}&wait_end_of_query=1&send_progress_in_http_headers=0&max_execution_time=1" --data-binary @- 2>&1) READ_ROWS=$(echo "${CURL_OUTPUT}" | \ grep 'X-ClickHouse-Summary' | \ @@ -20,6 +29,7 @@ then echo "Read rows in summary is not zero" else echo "Read rows in summary is zero!" + echo "${CURL_OUTPUT}" fi # Check that the response code is correct too diff --git a/tests/queries/0_stateless/02293_ttest_large_samples.sql b/tests/queries/0_stateless/02293_ttest_large_samples.sql index 14baa3fddfe..b4687541360 100644 --- a/tests/queries/0_stateless/02293_ttest_large_samples.sql +++ b/tests/queries/0_stateless/02293_ttest_large_samples.sql @@ -1,3 +1,5 @@ +-- Tags: long + SELECT roundBankers(result.1, 5), roundBankers(result.2, 5) FROM ( SELECT studentTTest(sample, variant) as result @@ -15,6 +17,8 @@ SELECT FROM system.numbers limit 500000)); +SET max_rows_to_read = 0; + SELECT roundBankers(result.1, 5), roundBankers(result.2, 5 ) FROM ( SELECT studentTTest(sample, variant) as result diff --git a/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh b/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh index 7a18b8fea29..27dbd3e3de6 100755 --- a/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh +++ b/tests/queries/0_stateless/02294_floating_point_second_in_settings.sh @@ -23,16 +23,16 @@ function check_output() { # TCP CLIENT echo "TCP CLIENT" -OUTPUT=$($CLICKHOUSE_CLIENT --max_execution_time $MAX_TIMEOUT -q "SELECT count() FROM system.numbers" 2>&1 || true) +OUTPUT=$($CLICKHOUSE_CLIENT --max_rows_to_read 0 --max_execution_time $MAX_TIMEOUT -q "SELECT count() FROM system.numbers" 2>&1 || true) check_output "${OUTPUT}" echo "TCP CLIENT WITH SETTINGS IN QUERY" -OUTPUT=$($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.numbers SETTINGS max_execution_time=$MAX_TIMEOUT" 2>&1 || true) +OUTPUT=$($CLICKHOUSE_CLIENT --max_rows_to_read 0 -q "SELECT count() FROM system.numbers SETTINGS max_execution_time=$MAX_TIMEOUT" 2>&1 || true) check_output "${OUTPUT}" # HTTP CLIENT echo "HTTP CLIENT" -OUTPUT=$(${CLICKHOUSE_CURL_COMMAND} -q -sS "$CLICKHOUSE_URL&max_execution_time=$MAX_TIMEOUT" -d \ +OUTPUT=$(${CLICKHOUSE_CURL_COMMAND} -q -sS "$CLICKHOUSE_URL&max_execution_time=${MAX_TIMEOUT}&max_rows_to_read=0" -d \ "SELECT count() FROM system.numbers" || true) check_output "${OUTPUT}" diff --git a/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference b/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference index fa7f1799c31..6b1e4743867 100644 --- a/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference +++ b/tests/queries/0_stateless/02313_dump_column_structure_low_cardinality.reference @@ -1 +1 @@ -Array(LowCardinality(String)), Const(size = 1, Array(size = 1, UInt64(size = 1), ColumnLowCardinality(size = 2, UInt8(size = 2), ColumnUnique(size = 3, String(size = 3))))) +Array(LowCardinality(String)), Const(size = 1, Array(size = 1, UInt64(size = 1), LowCardinality(size = 2, UInt8(size = 2), Unique(size = 3, String(size = 3))))) diff --git a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh index bd7e6be3987..953485c3a1f 100755 --- a/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh +++ b/tests/queries/0_stateless/02317_distinct_in_order_optimization_explain.sh @@ -23,99 +23,99 @@ $CLICKHOUSE_CLIENT -q "insert into distinct_in_order_explain select number % num $CLICKHOUSE_CLIENT -q "select '-- disable optimize_distinct_in_order'" $CLICKHOUSE_CLIENT -q "select '-- distinct all primary key columns -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- enable optimize_distinct_in_order'" $CLICKHOUSE_CLIENT -q "select '-- distinct with all primary key columns -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct * from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column in distinct -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by the same columns -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a, b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a, b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by columns are prefix of distinct columns -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain order by a" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column in distinct but non-primary key prefix -> pre-distinct and final distinct optimization'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b, c from distinct_in_order_explain order by c" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with primary key prefix and order by column _not_ in distinct -> pre-distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by column in distinct -> final distinct optimization only'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by b" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by column _not_ in distinct -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by a" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, c from distinct_in_order_explain order by a" | eval $FIND_DISTINCT $CLICKHOUSE_CLIENT -q "select '-- distinct with non-primary key prefix and order by _const_ column in distinct -> ordinary distinct'" -$CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, 1 as x from distinct_in_order_explain order by x" | eval $FIND_DISTINCT +$CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b, 1 as x from distinct_in_order_explain order by x" | eval $FIND_DISTINCT echo "-- Check reading in order for distinct" echo "-- disabled, distinct columns match sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$DISABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$DISABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "-- enabled, distinct columns match sorting key" # read_in_order_two_level_merge_threshold is set here to avoid repeating MergeTreeInOrder in output -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, b from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns DON't form prefix of sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct b from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "-- enabled, distinct columns contains constant columns, non-const columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, distinct columns contains constant columns, non-const columns match prefix of sorting key" -$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, b, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER +$CLICKHOUSE_CLIENT --read_in_order_two_level_merge_threshold=2 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct 1, b, a from distinct_in_order_explain" | eval $FIND_READING_IN_ORDER echo "-- enabled, only part of distinct columns form prefix of sorting key" -$CLICKHOUSE_CLIENT --max_threads=0 -nq "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT +$CLICKHOUSE_CLIENT --max_threads=0 -q "$ENABLE_OPTIMIZATION;explain pipeline select distinct a, c from distinct_in_order_explain" | eval $FIND_READING_DEFAULT echo "=== disable new analyzer ===" DISABLE_ANALYZER="set enable_analyzer=0" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0" | eval $FIND_SORTING_PROPERTIES echo "-- check that reading in order optimization for ORDER BY and DISTINCT applied correctly in the same query" ENABLE_READ_IN_ORDER="set optimize_read_in_order=1" echo "-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization i.e. it contains columns from DISTINCT clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization, but direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (1), - it contains columns from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (2), - direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that disabling other 'read in order' optimizations do not disable distinct in order optimization" -$CLICKHOUSE_CLIENT -nq "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$DISABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES echo "=== enable new analyzer ===" ENABLE_ANALYZER="set enable_analyzer=1" echo "-- enabled, check that sorting properties are propagated from ReadFromMergeTree till preliminary distinct" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;explain plan sorting=1 select distinct b, a from distinct_in_order_explain where a > 0 settings optimize_move_to_prewhere=1" | eval $FIND_SORTING_PROPERTIES echo "-- disabled, check that sorting description for ReadFromMergeTree match ORDER BY columns" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$DISABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization i.e. it contains columns from DISTINCT clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is overwritten by DISTINCT optimization, but direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (1), - it contains columns from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct a from distinct_in_order_explain order by a, b" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that ReadFromMergeTree sorting description is NOT overwritten by DISTINCT optimization (2), - direction used from ORDER BY clause" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;$ENABLE_READ_IN_ORDER;explain plan sorting=1 select distinct b, a from distinct_in_order_explain order by a DESC, b DESC" | eval $FIND_SORTING_PROPERTIES echo "-- enabled, check that disabling other 'read in order' optimizations do not disable distinct in order optimization" -$CLICKHOUSE_CLIENT -nq "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES +$CLICKHOUSE_CLIENT -q "$ENABLE_ANALYZER;$ENABLE_OPTIMIZATION;set optimize_read_in_order=0;set optimize_aggregation_in_order=0;set optimize_read_in_window_order=0;explain plan sorting=1 select distinct a,b from distinct_in_order_explain" | eval $FIND_SORTING_PROPERTIES $CLICKHOUSE_CLIENT -q "drop table if exists distinct_in_order_explain sync" diff --git a/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql b/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql index f82f79dbe44..6491253cd5f 100644 --- a/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql +++ b/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql @@ -102,7 +102,7 @@ ALTER TABLE t_proj ADD PROJECTION p_1 (SELECT avg(a), avg(b), count()) SETTINGS INSERT INTO t_proj SELECT number + 1, number + 1 FROM numbers(1000); -DELETE FROM t_proj WHERE a < 100; -- { serverError NOT_IMPLEMENTED } +DELETE FROM t_proj WHERE a < 100; -- { serverError SUPPORT_IS_DISABLED } SELECT avg(a), avg(b), count() FROM t_proj; diff --git a/tests/queries/0_stateless/02325_dates_schema_inference.reference b/tests/queries/0_stateless/02325_dates_schema_inference.reference index c8eebd3262e..124f105220d 100644 --- a/tests/queries/0_stateless/02325_dates_schema_inference.reference +++ b/tests/queries/0_stateless/02325_dates_schema_inference.reference @@ -1,29 +1,29 @@ JSONEachRow x Nullable(Date) x Nullable(DateTime64(9)) -x Nullable(DateTime64(9)) +x Nullable(DateTime) x Array(Nullable(Date)) -x Array(Nullable(DateTime64(9))) -x Array(Nullable(DateTime64(9))) -x Tuple(\n date1 Nullable(DateTime64(9)),\n date2 Nullable(Date)) -x Array(Nullable(DateTime64(9))) -x Array(Nullable(DateTime64(9))) -x Nullable(DateTime64(9)) +x Array(Nullable(DateTime)) +x Array(Nullable(DateTime)) +x Tuple(\n date1 Nullable(DateTime),\n date2 Nullable(Date)) +x Array(Nullable(DateTime)) +x Array(Nullable(DateTime)) +x Nullable(DateTime) x Array(Nullable(String)) x Nullable(String) x Array(Nullable(String)) -x Tuple(\n key1 Array(Array(Nullable(DateTime64(9)))),\n key2 Array(Array(Nullable(String)))) +x Tuple(\n key1 Array(Array(Nullable(DateTime))),\n key2 Array(Array(Nullable(String)))) CSV c1 Nullable(Date) c1 Nullable(DateTime64(9)) -c1 Nullable(DateTime64(9)) +c1 Nullable(DateTime) c1 Array(Nullable(Date)) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Map(String, Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Nullable(DateTime64(9)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Map(String, Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Nullable(DateTime) c1 Array(Nullable(String)) c1 Nullable(String) c1 Array(Nullable(String)) @@ -31,14 +31,14 @@ c1 Map(String, Array(Array(Nullable(String)))) TSV c1 Nullable(Date) c1 Nullable(DateTime64(9)) -c1 Nullable(DateTime64(9)) +c1 Nullable(DateTime) c1 Array(Nullable(Date)) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Map(String, Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Nullable(DateTime64(9)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Map(String, Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Nullable(DateTime) c1 Array(Nullable(String)) c1 Nullable(String) c1 Array(Nullable(String)) @@ -46,14 +46,14 @@ c1 Map(String, Array(Array(Nullable(String)))) Values c1 Nullable(Date) c1 Nullable(DateTime64(9)) -c1 Nullable(DateTime64(9)) +c1 Nullable(DateTime) c1 Array(Nullable(Date)) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Map(String, Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Array(Nullable(DateTime64(9))) -c1 Nullable(DateTime64(9)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Map(String, Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Array(Nullable(DateTime)) +c1 Nullable(DateTime) c1 Array(Nullable(String)) c1 Nullable(String) c1 Array(Nullable(String)) diff --git a/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh b/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh index 96f80d65878..490f8361682 100755 --- a/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh +++ b/tests/queries/0_stateless/02335_column_ttl_expired_column_optimization.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) data_path="$CLICKHOUSE_TMP/local" -$CLICKHOUSE_LOCAL --path "$data_path" -nm -q " +$CLICKHOUSE_LOCAL --path "$data_path" -m -q " create table ttl_02335 ( date Date, key Int, diff --git a/tests/queries/0_stateless/02343_aggregation_pipeline.reference b/tests/queries/0_stateless/02343_aggregation_pipeline.reference index bf61eb6da0a..eb013200a17 100644 --- a/tests/queries/0_stateless/02343_aggregation_pipeline.reference +++ b/tests/queries/0_stateless/02343_aggregation_pipeline.reference @@ -1,6 +1,6 @@ -- { echoOn } -explain pipeline select * from (select * from numbers(1e8) group by number) group by number; +explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0; (Expression) ExpressionTransform × 16 (Aggregating) @@ -16,7 +16,7 @@ ExpressionTransform × 16 ExpressionTransform (ReadFromSystemNumbers) NumbersRange 0 → 1 -explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0; (Expression) ExpressionTransform × 16 (Aggregating) @@ -32,7 +32,7 @@ ExpressionTransform × 16 ExpressionTransform × 16 (ReadFromSystemNumbers) NumbersRange × 16 0 → 1 -explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0; (Expression) ExpressionTransform (Sorting) diff --git a/tests/queries/0_stateless/02343_aggregation_pipeline.sql b/tests/queries/0_stateless/02343_aggregation_pipeline.sql index 0f9dbd0247d..24d54293313 100644 --- a/tests/queries/0_stateless/02343_aggregation_pipeline.sql +++ b/tests/queries/0_stateless/02343_aggregation_pipeline.sql @@ -13,11 +13,9 @@ set allow_prefetched_read_pool_for_local_filesystem = 0; -- { echoOn } -explain pipeline select * from (select * from numbers(1e8) group by number) group by number; - -explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number; - -explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number; +explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0; explain pipeline select number from remote('127.0.0.{1,2,3}', system, numbers_mt) group by number settings distributed_aggregation_memory_efficient = 1; diff --git a/tests/queries/0_stateless/02344_describe_cache.reference b/tests/queries/0_stateless/02344_describe_cache.reference index 6895606eb2b..13429b14866 100644 --- a/tests/queries/0_stateless/02344_describe_cache.reference +++ b/tests/queries/0_stateless/02344_describe_cache.reference @@ -1,2 +1,2 @@ 1 -102400 10000000 33554432 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/02344_describe_cache_test 0 5000 0 16 +102400 10000000 33554432 1 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/02344_describe_cache_test 0 5000 0 16 diff --git a/tests/queries/0_stateless/02344_describe_cache.sh b/tests/queries/0_stateless/02344_describe_cache.sh index d91661db9bc..c5373b4d7e3 100755 --- a/tests/queries/0_stateless/02344_describe_cache.sh +++ b/tests/queries/0_stateless/02344_describe_cache.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT -nm --query """ DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) ENGINE = MergeTree() ORDER BY tuple() -SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk'); +SETTINGS disk = disk(name = '$disk_name', type = cache, max_size = '100Ki', path = '$disk_name', disk = 's3_disk', load_metadata_asynchronously = 0); """ $CLICKHOUSE_CLIENT -nm --query """ diff --git a/tests/queries/0_stateless/02344_insert_profile_events_stress.sql b/tests/queries/0_stateless/02344_insert_profile_events_stress.sql index e9a790bea5d..902e1da543c 100644 --- a/tests/queries/0_stateless/02344_insert_profile_events_stress.sql +++ b/tests/queries/0_stateless/02344_insert_profile_events_stress.sql @@ -1,4 +1,5 @@ -- Tags: no-parallel, long, no-debug, no-tsan, no-msan, no-asan +SET max_rows_to_read = 0; create table data_02344 (key Int) engine=Null; -- 3e9 rows is enough to fill the socket buffer and cause INSERT hung. diff --git a/tests/queries/0_stateless/02345_implicit_transaction.sql b/tests/queries/0_stateless/02345_implicit_transaction.sql index ee2e0a07c3e..9496de71e13 100644 --- a/tests/queries/0_stateless/02345_implicit_transaction.sql +++ b/tests/queries/0_stateless/02345_implicit_transaction.sql @@ -3,7 +3,7 @@ CREATE TABLE landing (n Int64) engine=MergeTree order by n; CREATE TABLE target (n Int64) engine=MergeTree order by n; CREATE MATERIALIZED VIEW landing_to_target TO target AS - SELECT n + throwIf(n == 3333) + SELECT n + throwIf(n == 3333) AS n FROM landing; INSERT INTO landing SELECT * FROM numbers(10000); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } diff --git a/tests/queries/0_stateless/02346_additional_filters.sql b/tests/queries/0_stateless/02346_additional_filters.sql index f6b665713ec..5a799e1c8c1 100644 --- a/tests/queries/0_stateless/02346_additional_filters.sql +++ b/tests/queries/0_stateless/02346_additional_filters.sql @@ -4,6 +4,8 @@ drop table if exists table_2; drop table if exists v_numbers; drop table if exists mv_table; +SET max_rows_to_read = 0; + create table table_1 (x UInt32, y String) engine = MergeTree order by x; insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); diff --git a/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql b/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql index b324f834053..e7c6c272102 100644 --- a/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql +++ b/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql @@ -2,5 +2,6 @@ SET max_execution_time = 3; SET timeout_overflow_mode = 'break'; +SET max_rows_to_read = 0, max_bytes_to_read = 0; SELECT count() FROM system.numbers_mt WHERE NOT ignore(JSONExtract('{' || repeat('"a":"b",', rand() % 10) || '"c":"d"}', 'a', 'String')) FORMAT Null; diff --git a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql index 105fb500461..f9da5b3a73c 100644 --- a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql +++ b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql @@ -1,5 +1,7 @@ -- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-object-storage +SET max_rows_to_read = '101M'; + DROP TABLE IF EXISTS t_2354_dist_with_external_aggr; create table t_2354_dist_with_external_aggr(a UInt64, b String, c FixedString(100)) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; @@ -23,6 +25,6 @@ select a, b, c, sum(a) as s from remote('127.0.0.{2,3}', currentDatabase(), t_2354_dist_with_external_aggr) group by a, b, c format Null -settings max_memory_usage = '5Gi'; +settings max_memory_usage = '5Gi', max_result_rows = 0, max_result_bytes = 0; DROP TABLE t_2354_dist_with_external_aggr; diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.reference b/tests/queries/0_stateless/02354_vector_search_bugs.reference index a27b086e118..8da05c8a7c0 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.reference +++ b/tests/queries/0_stateless/02354_vector_search_bugs.reference @@ -1,3 +1,4 @@ +Rejects INSERTs of Arrays with different sizes Issue #52258: Empty Arrays or Arrays with default values are rejected It is possible to create parts with different Array vector sizes but there will be an error at query time Correctness of index with > 1 mark diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql index 7c66b4b8e45..51e2e6ce2b7 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ b/tests/queries/0_stateless/02354_vector_search_bugs.sql @@ -7,6 +7,12 @@ SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make DROP TABLE IF EXISTS tab; +SELECT 'Rejects INSERTs of Arrays with different sizes'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; +INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } +DROP TABLE tab; + SELECT 'Issue #52258: Empty Arrays or Arrays with default values are rejected'; CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree() ORDER BY id; diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference index b6d034208d0..5963f4b5834 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference @@ -3,8 +3,5 @@ Two or six index arguments 2nd argument (distance function) must be String and L2Distance or cosineDistance 3nd argument (quantization), if given, must be String and f32, f16, ... 4nd argument (M), if given, must be UInt64 and > 1 -5nd argument (ef_construction), if given, must be UInt64 and > 0 -6nd argument (ef_search), if given, must be UInt64 and > 0 Must be created on single column Must be created on Array(Float32) columns -Rejects INSERTs of Arrays with different sizes diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql index 7c2ddfe81fc..e8e6aaee1b2 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql @@ -27,12 +27,6 @@ CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similar SELECT '4nd argument (M), if given, must be UInt64 and > 1'; CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 'invalid', 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 1, 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -SELECT '5nd argument (ef_construction), if given, must be UInt64 and > 0'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 'invalid', 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 0, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -SELECT '6nd argument (ef_search), if given, must be UInt64 and > 0'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 1, 'invalid')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 2, 1, 0)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } SELECT 'Must be created on single column'; CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } @@ -41,11 +35,6 @@ SELECT 'Must be created on Array(Float32) columns'; SET allow_suspicious_low_cardinality_types = 1; CREATE TABLE tab(id Int32, vec UInt64, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Array(UInt64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } - -SELECT 'Rejects INSERTs of Arrays with different sizes'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference index 7c8e4c0ca59..cb3a8c801b1 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.reference +++ b/tests/queries/0_stateless/02354_vector_search_queries.reference @@ -1,9 +1,7 @@ 10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block -- ORDER-BY-type 5 [0,2] 0 6 [0,2.1] 0.09999990463256836 7 [0,2.2] 0.20000004768371582 -- ORDER-BY-type, EXPLAIN Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -20,11 +18,9 @@ Expression (Projection) Parts: 1/1 Granules: 1/1 12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block -- ORDER-BY-type 6 [0,2] 0 7 [0,2.1] 0.09999990463256836 8 [0,2.2] 0.20000004768371582 -- ORDER-BY-type, EXPLAIN Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -41,11 +37,11 @@ Expression (Projection) Parts: 1/1 Granules: 2/4 Special cases -- ORDER-BY-type +-- Non-default metric, M, ef_construction, ef_search 6 [1,9.3] 0.005731362878640178 1 [2,3.2] 0.15200169244542905 7 [5.5,4.7] 0.3503476876550442 -- Special case: setting "max_limit_for_ann_queries" +-- Setting "max_limit_for_ann_queries" Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -56,3 +52,62 @@ Expression (Projection) Condition: true Parts: 1/1 Granules: 4/4 +-- Non-default quantization +1 [2,3.2] 2.3323807824711897 +2 [4.2,3.4] 4.427188573446585 +0 [4.6,2.3] 4.609772130377966 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_f32) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 2/4 +1 [2,3.2] 2.3323807824711897 +2 [4.2,3.4] 4.427188573446585 +0 [4.6,2.3] 4.609772130377966 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_f16) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 2/4 +1 [2,3.2] 2.3323807824711897 +2 [4.2,3.4] 4.427188573446585 +0 [4.6,2.3] 4.609772130377966 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab_i8) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 2/4 +-- Index on Array(Float64) column +6 [0,2] 0 +7 [0,2.1] 0.10000000000000009 +8 [0,2.2] 0.20000000000000018 diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index dbf0fca32ab..fbf8427d8fe 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -14,14 +14,12 @@ CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similar INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); -SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- ORDER-BY-type, EXPLAIN'; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) @@ -37,14 +35,12 @@ SELECT '12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexe CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); -SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- ORDER-BY-type, EXPLAIN'; EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) @@ -56,19 +52,18 @@ DROP TABLE tab; SELECT 'Special cases'; -- Not a systematic test, just to check that no bad things happen. --- Test with non-default metric, M, ef_construction, ef_search +SELECT '-- Non-default metric, M, ef_construction, ef_search'; CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f32', 42, 99, 66) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); -SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, cosineDistance(vec, reference_vec) FROM tab ORDER BY cosineDistance(vec, reference_vec) LIMIT 3; -SELECT '- Special case: setting "max_limit_for_ann_queries"'; +SELECT '-- Setting "max_limit_for_ann_queries"'; EXPLAIN indexes=1 WITH [0.0, 2.0] as reference_vec SELECT id, vec, cosineDistance(vec, reference_vec) @@ -78,3 +73,66 @@ LIMIT 3 SETTINGS max_limit_for_ann_queries = 2; -- LIMIT 3 > 2 --> don't use the ann index DROP TABLE tab; + +SELECT '-- Non-default quantization'; +CREATE TABLE tab_f32(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 0, 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_f16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f16', 0, 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_i8(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'i8', 0, 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab_f32 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_f16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_i8 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_f16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_f16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_i8 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_i8 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab_f32; +DROP TABLE tab_f16; +DROP TABLE tab_i8; + +SELECT '-- Index on Array(Float64) column'; +CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02355_column_type_name_lc.reference b/tests/queries/0_stateless/02355_column_type_name_lc.reference index 234a072299f..50c25a86b2f 100644 --- a/tests/queries/0_stateless/02355_column_type_name_lc.reference +++ b/tests/queries/0_stateless/02355_column_type_name_lc.reference @@ -1 +1 @@ -ColumnLowCardinality +LowCardinality(String) diff --git a/tests/queries/0_stateless/02361_fsync_profile_events.sh b/tests/queries/0_stateless/02361_fsync_profile_events.sh index 98c9cf9b7b4..73bf3fa120a 100755 --- a/tests/queries/0_stateless/02361_fsync_profile_events.sh +++ b/tests/queries/0_stateless/02361_fsync_profile_events.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data_fsync_pe; create table data_fsync_pe (key Int) engine=MergeTree() @@ -27,7 +27,7 @@ for i in {1..100}; do $CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data_fsync_pe values (1)" read -r FileSync FileOpen DirectorySync FileSyncElapsedMicroseconds DirectorySyncElapsedMicroseconds <<<"$( - $CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q " + $CLICKHOUSE_CLIENT -m --param_query_id "$query_id" -q " system flush logs; select diff --git a/tests/queries/0_stateless/02372_now_in_block.sql b/tests/queries/0_stateless/02372_now_in_block.sql index aee4572ce8d..d0aec471801 100644 --- a/tests/queries/0_stateless/02372_now_in_block.sql +++ b/tests/queries/0_stateless/02372_now_in_block.sql @@ -1,3 +1,4 @@ +SET max_rows_to_read = 0, max_bytes_to_read = 0; SELECT count() FROM (SELECT DISTINCT nowInBlock(), nowInBlock('Pacific/Pitcairn') FROM system.numbers LIMIT 2); SELECT nowInBlock(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } SELECT nowInBlock(NULL) IS NULL; diff --git a/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh b/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh index 71e3b6961f8..46396d38747 100755 --- a/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh +++ b/tests/queries/0_stateless/02377_extend_protocol_with_query_parameters.sh @@ -24,7 +24,7 @@ $CLICKHOUSE_CLIENT \ table_name="t_02377_extend_protocol_with_query_parameters_$RANDOM$RANDOM" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " create table $table_name( id Int64, arr Array(UInt8), @@ -57,17 +57,17 @@ $CLICKHOUSE_CLIENT \ # it is possible to set parameter for the current session -$CLICKHOUSE_CLIENT -n -q "set param_n = 42; select {n: UInt8}" +$CLICKHOUSE_CLIENT -q "set param_n = 42; select {n: UInt8}" # and it will not be visible to other sessions -$CLICKHOUSE_CLIENT -n -q "select {n: UInt8} -- { serverError 456 }" +$CLICKHOUSE_CLIENT -q "select {n: UInt8} -- { serverError 456 }" # the same parameter could be set multiple times within one session (new value overrides the previous one) -$CLICKHOUSE_CLIENT -n -q "set param_n = 12; set param_n = 13; select {n: UInt8}" +$CLICKHOUSE_CLIENT -q "set param_n = 12; set param_n = 13; select {n: UInt8}" # multiple different parameters could be defined within each session -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " set param_a = 13, param_b = 'str'; set param_c = '2022-08-04 18:30:53'; set param_d = '{\'10\': [11, 12], \'13\': [14, 15]}'; diff --git a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh index 4b9793da5bb..974f10e2f24 100755 --- a/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh +++ b/tests/queries/0_stateless/02377_optimize_sorting_by_input_stream_properties_explain.sh @@ -15,7 +15,7 @@ FIND_SORTMODE="$GREP_SORTMODE | $TRIM_LEADING_SPACES" function explain_sorting { echo "-- QUERY: "$1 - $CLICKHOUSE_CLIENT --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -nq "$1" | eval $FIND_SORTING + $CLICKHOUSE_CLIENT --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.0 -q "$1" | eval $FIND_SORTING } function explain_sortmode { diff --git a/tests/queries/0_stateless/02378_part_log_profile_events.sql b/tests/queries/0_stateless/02378_part_log_profile_events.sql index 38d3f8b4c05..eec76d6f50e 100644 --- a/tests/queries/0_stateless/02378_part_log_profile_events.sql +++ b/tests/queries/0_stateless/02378_part_log_profile_events.sql @@ -39,7 +39,7 @@ SYSTEM FLUSH LOGS; SELECT if(count() == 2, 'Ok', 'Error: ' || toString(count())), - if(SUM(ProfileEvents['MergedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergedRows']))), + if(SUM(ProfileEvents['MutatedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MutatedRows']))), if(SUM(ProfileEvents['FileOpen']) > 1, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['FileOpen']))) FROM system.part_log WHERE event_time > now() - INTERVAL 10 MINUTE diff --git a/tests/queries/0_stateless/02404_data.CSV b/tests/queries/0_stateless/02404_data.CSV new file mode 100644 index 00000000000..2d8b5c8daa8 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.CSV @@ -0,0 +1,10 @@ +0,"1970-01-01" +1,"1970-01-02" +2,"1970-01-03" +3,"1970-01-04" +4,"1970-01-05" +5,"1970-01-06" +6,"1970-01-07" +7,"1970-01-08" +8,"1970-01-09" +9,"1970-01-10" diff --git a/tests/queries/0_stateless/02404_data.CSVWithNames b/tests/queries/0_stateless/02404_data.CSVWithNames new file mode 100644 index 00000000000..34647008916 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.CSVWithNames @@ -0,0 +1,11 @@ +"number","toDate(number)" +0,"1970-01-01" +1,"1970-01-02" +2,"1970-01-03" +3,"1970-01-04" +4,"1970-01-05" +5,"1970-01-06" +6,"1970-01-07" +7,"1970-01-08" +8,"1970-01-09" +9,"1970-01-10" diff --git a/tests/queries/0_stateless/02404_data.CustomSeparated b/tests/queries/0_stateless/02404_data.CustomSeparated new file mode 100644 index 00000000000..f3ae1663536 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.CustomSeparated @@ -0,0 +1,10 @@ +0 1970-01-01 +1 1970-01-02 +2 1970-01-03 +3 1970-01-04 +4 1970-01-05 +5 1970-01-06 +6 1970-01-07 +7 1970-01-08 +8 1970-01-09 +9 1970-01-10 diff --git a/tests/queries/0_stateless/02404_data.JSONCompactEachRow b/tests/queries/0_stateless/02404_data.JSONCompactEachRow new file mode 100644 index 00000000000..de2e0986aab --- /dev/null +++ b/tests/queries/0_stateless/02404_data.JSONCompactEachRow @@ -0,0 +1,10 @@ +["0", "1970-01-01"] +["1", "1970-01-02"] +["2", "1970-01-03"] +["3", "1970-01-04"] +["4", "1970-01-05"] +["5", "1970-01-06"] +["6", "1970-01-07"] +["7", "1970-01-08"] +["8", "1970-01-09"] +["9", "1970-01-10"] diff --git a/tests/queries/0_stateless/02404_data.JSONEachRow b/tests/queries/0_stateless/02404_data.JSONEachRow new file mode 100644 index 00000000000..e77256ac7fc --- /dev/null +++ b/tests/queries/0_stateless/02404_data.JSONEachRow @@ -0,0 +1,10 @@ +{"number":"0","toDate(number)":"1970-01-01"} +{"number":"1","toDate(number)":"1970-01-02"} +{"number":"2","toDate(number)":"1970-01-03"} +{"number":"3","toDate(number)":"1970-01-04"} +{"number":"4","toDate(number)":"1970-01-05"} +{"number":"5","toDate(number)":"1970-01-06"} +{"number":"6","toDate(number)":"1970-01-07"} +{"number":"7","toDate(number)":"1970-01-08"} +{"number":"8","toDate(number)":"1970-01-09"} +{"number":"9","toDate(number)":"1970-01-10"} diff --git a/tests/queries/0_stateless/02404_data.TSKV b/tests/queries/0_stateless/02404_data.TSKV new file mode 100644 index 00000000000..70f7ad33c8b --- /dev/null +++ b/tests/queries/0_stateless/02404_data.TSKV @@ -0,0 +1,10 @@ +number=0 toDate(number)=1970-01-01 +number=1 toDate(number)=1970-01-02 +number=2 toDate(number)=1970-01-03 +number=3 toDate(number)=1970-01-04 +number=4 toDate(number)=1970-01-05 +number=5 toDate(number)=1970-01-06 +number=6 toDate(number)=1970-01-07 +number=7 toDate(number)=1970-01-08 +number=8 toDate(number)=1970-01-09 +number=9 toDate(number)=1970-01-10 diff --git a/tests/queries/0_stateless/02404_data.TSV b/tests/queries/0_stateless/02404_data.TSV new file mode 100644 index 00000000000..f3ae1663536 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.TSV @@ -0,0 +1,10 @@ +0 1970-01-01 +1 1970-01-02 +2 1970-01-03 +3 1970-01-04 +4 1970-01-05 +5 1970-01-06 +6 1970-01-07 +7 1970-01-08 +8 1970-01-09 +9 1970-01-10 diff --git a/tests/queries/0_stateless/02404_data.TSVWithNames b/tests/queries/0_stateless/02404_data.TSVWithNames new file mode 100644 index 00000000000..23310234a8c --- /dev/null +++ b/tests/queries/0_stateless/02404_data.TSVWithNames @@ -0,0 +1,11 @@ +number toDate(number) +0 1970-01-01 +1 1970-01-02 +2 1970-01-03 +3 1970-01-04 +4 1970-01-05 +5 1970-01-06 +6 1970-01-07 +7 1970-01-08 +8 1970-01-09 +9 1970-01-10 diff --git a/tests/queries/0_stateless/02404_data.Values b/tests/queries/0_stateless/02404_data.Values new file mode 100644 index 00000000000..d9a621d7ec9 --- /dev/null +++ b/tests/queries/0_stateless/02404_data.Values @@ -0,0 +1 @@ +(0,'1970-01-01'),(1,'1970-01-02'),(2,'1970-01-03'),(3,'1970-01-04'),(4,'1970-01-05'),(5,'1970-01-06'),(6,'1970-01-07'),(7,'1970-01-08'),(8,'1970-01-09'),(9,'1970-01-10') \ No newline at end of file diff --git a/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference b/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference index 049603328d9..3d6b1021916 100644 --- a/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference +++ b/tests/queries/0_stateless/02404_schema_inference_cache_respect_format_settings.reference @@ -4,7 +4,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 @@ -14,7 +14,7 @@ toDate(number) Nullable(Date) number Nullable(Float64) toDate(number) Nullable(Date) number Nullable(Int64) -toDate(number) Nullable(DateTime64(9)) +toDate(number) Nullable(DateTime) number Nullable(Int64) toDate(number) Nullable(Date) 4 @@ -24,7 +24,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 @@ -34,7 +34,7 @@ toDate(number) Nullable(Date) number Nullable(Float64) toDate(number) Nullable(Date) number Nullable(Int64) -toDate(number) Nullable(DateTime64(9)) +toDate(number) Nullable(DateTime) number Nullable(Int64) toDate(number) Nullable(Date) 4 @@ -44,7 +44,7 @@ toDate(number) Nullable(Date) number Nullable(Float64) toDate(number) Nullable(Date) number Nullable(Int64) -toDate(number) Nullable(DateTime64(9)) +toDate(number) Nullable(DateTime) number Nullable(Int64) toDate(number) Nullable(Date) 4 @@ -54,7 +54,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 @@ -64,7 +64,7 @@ toDate(number) Nullable(Date) number Nullable(Float64) toDate(number) Nullable(Date) number Nullable(Int64) -toDate(number) Nullable(DateTime64(9)) +toDate(number) Nullable(DateTime) number Nullable(Int64) toDate(number) Nullable(Date) 4 @@ -74,7 +74,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 @@ -84,7 +84,7 @@ c2 Nullable(Date) c1 Nullable(Float64) c2 Nullable(Date) c1 Nullable(Int64) -c2 Nullable(DateTime64(9)) +c2 Nullable(DateTime) c1 UInt8 c2 Nullable(Date) 4 diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference index 8dd8910c858..940bc42df56 100644 --- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference +++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference @@ -416,7 +416,6 @@ logTrace lowCardinalityIndices lowCardinalityKeys lower -lowerUTF8 makeDate makeDate32 makeDateTime @@ -896,7 +895,6 @@ tupleToNameValuePairs unbin unhex upper -upperUTF8 uptime validateNestedArraySizes version diff --git a/tests/queries/0_stateless/02417_load_marks_async.sh b/tests/queries/0_stateless/02417_load_marks_async.sh index 950656e7ab6..bcede9e4f5e 100755 --- a/tests/queries/0_stateless/02417_load_marks_async.sh +++ b/tests/queries/0_stateless/02417_load_marks_async.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS test;" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " CREATE TABLE test ( n0 UInt64, diff --git a/tests/queries/0_stateless/02421_new_type_json_async_insert.reference b/tests/queries/0_stateless/02421_new_type_json_async_insert.reference new file mode 100644 index 00000000000..fdd133460c6 --- /dev/null +++ b/tests/queries/0_stateless/02421_new_type_json_async_insert.reference @@ -0,0 +1,5 @@ +INCORRECT_DATA +0 +0 +INCORRECT_DATA +aaa diff --git a/tests/queries/0_stateless/02421_new_type_json_async_insert.sh b/tests/queries/0_stateless/02421_new_type_json_async_insert.sh new file mode 100755 index 00000000000..b23470a4179 --- /dev/null +++ b/tests/queries/0_stateless/02421_new_type_json_async_insert.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_async_insert" +$CLICKHOUSE_CLIENT --allow_experimental_json_type=1 -q "CREATE TABLE t_json_async_insert (data JSON) ENGINE = MergeTree ORDER BY tuple()" + +$CLICKHOUSE_CLIENT --async_insert=1 --wait_for_async_insert=1 -q 'INSERT INTO t_json_async_insert FORMAT JSONAsObject {"aaa"}' 2>&1 | grep -o -m1 "INCORRECT_DATA" +$CLICKHOUSE_CLIENT -q "SELECT count() FROM t_json_async_insert" +$CLICKHOUSE_CLIENT -q "SELECT count() FROM system.parts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_json_async_insert'" + +$CLICKHOUSE_CLIENT --async_insert=1 --wait_for_async_insert=1 -q 'INSERT INTO t_json_async_insert FORMAT JSONAsObject {"aaa"}' 2>&1 | grep -o -m1 "INCORRECT_DATA" & +$CLICKHOUSE_CLIENT --async_insert=1 --wait_for_async_insert=1 -q 'INSERT INTO t_json_async_insert FORMAT JSONAsObject {"k1": "aaa"}' & + +wait + +$CLICKHOUSE_CLIENT -q "SELECT data.k1 FROM t_json_async_insert ORDER BY data.k1" +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_async_insert" diff --git a/tests/queries/0_stateless/02421_new_type_json_empty_parts.reference b/tests/queries/0_stateless/02421_new_type_json_empty_parts.reference new file mode 100644 index 00000000000..172ea2d3eed --- /dev/null +++ b/tests/queries/0_stateless/02421_new_type_json_empty_parts.reference @@ -0,0 +1,16 @@ +Collapsing +0 +0 +DELETE all +2 +1 +('k1','String') +('k2','String') +0 +0 +TTL +1 +1 +('k2','String') +0 +0 diff --git a/tests/queries/0_stateless/02421_new_type_json_empty_parts.sh b/tests/queries/0_stateless/02421_new_type_json_empty_parts.sh new file mode 100755 index 00000000000..2714b9586f8 --- /dev/null +++ b/tests/queries/0_stateless/02421_new_type_json_empty_parts.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +set -euo pipefail + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh +# shellcheck source=./parts.lib +. "$CURDIR"/parts.lib + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT 'Collapsing';" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, s Int8, data JSON) ENGINE = CollapsingMergeTree(s) ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_json_type 1 +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, 1, '{\"k1\": \"aaa\"}') (1, -1, '{\"k2\": \"bbb\"}');" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT 'DELETE all';" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, data JSON) ENGINE = MergeTree ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_json_type 1 +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, '{\"k1\": \"aaa\"}') (1, '{\"k2\": \"bbb\"}');" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" +${CLICKHOUSE_CLIENT} -q "ALTER TABLE t_json_empty_parts DELETE WHERE 1 SETTINGS mutations_sync = 1;" +timeout 60 bash -c 'wait_for_delete_empty_parts t_json_empty_parts' +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT 'TTL';" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, d Date, data JSON) ENGINE = MergeTree ORDER BY id TTL d WHERE id % 2 = 1 SETTINGS old_parts_lifetime=5;" --allow_experimental_json_type 1 +${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, '2000-01-01', '{\"k1\": \"aaa\"}') (2, '2000-01-01', '{\"k2\": \"bbb\"}');" +${CLICKHOUSE_CLIENT} -q "OPTIMIZE TABLE t_json_empty_parts FINAL;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" +${CLICKHOUSE_CLIENT} -q "ALTER TABLE t_json_empty_parts MODIFY TTL d;" +${CLICKHOUSE_CLIENT} -q "OPTIMIZE TABLE t_json_empty_parts FINAL;" +timeout 60 bash -c 'wait_for_delete_empty_parts t_json_empty_parts' +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" +${CLICKHOUSE_CLIENT} -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM t_json_empty_parts ORDER BY path" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" diff --git a/tests/queries/0_stateless/02421_type_json_async_insert.sh b/tests/queries/0_stateless/02421_type_json_async_insert.sh index 8aa0d510dbb..73d66d116ce 100755 --- a/tests/queries/0_stateless/02421_type_json_async_insert.sh +++ b/tests/queries/0_stateless/02421_type_json_async_insert.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS t_json_async_insert" -$CLICKHOUSE_CLIENT --allow_experimental_object_type=1 -q "CREATE TABLE t_json_async_insert (data JSON) ENGINE = MergeTree ORDER BY tuple()" +$CLICKHOUSE_CLIENT --allow_experimental_object_type=1 -q "CREATE TABLE t_json_async_insert (data Object('json')) ENGINE = MergeTree ORDER BY tuple()" $CLICKHOUSE_CLIENT --async_insert=1 --wait_for_async_insert=1 -q 'INSERT INTO t_json_async_insert FORMAT JSONAsObject {"aaa"}' 2>&1 | grep -o -m1 "Cannot parse object" $CLICKHOUSE_CLIENT -q "SELECT count() FROM t_json_async_insert" diff --git a/tests/queries/0_stateless/02421_type_json_empty_parts.sh b/tests/queries/0_stateless/02421_type_json_empty_parts.sh index b6cf5995bfa..2ecec524e25 100755 --- a/tests/queries/0_stateless/02421_type_json_empty_parts.sh +++ b/tests/queries/0_stateless/02421_type_json_empty_parts.sh @@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT 'Collapsing';" -${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, s Int8, data JSON) ENGINE = CollapsingMergeTree(s) ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, s Int8, data Object('json')) ENGINE = CollapsingMergeTree(s) ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 ${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, 1, '{\"k1\": \"aaa\"}') (1, -1, '{\"k2\": \"bbb\"}');" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" @@ -19,7 +19,7 @@ ${CLICKHOUSE_CLIENT} -q "DESC TABLE t_json_empty_parts SETTINGS describe_extend_ ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT 'DELETE all';" -${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, data JSON) ENGINE = MergeTree ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, data Object('json')) ENGINE = MergeTree ORDER BY id SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 ${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, '{\"k1\": \"aaa\"}') (1, '{\"k2\": \"bbb\"}');" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.parts WHERE table = 't_json_empty_parts' AND database = currentDatabase() AND active;" @@ -32,7 +32,7 @@ ${CLICKHOUSE_CLIENT} -q "DESC TABLE t_json_empty_parts SETTINGS describe_extend_ ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS t_json_empty_parts;" ${CLICKHOUSE_CLIENT} -q "SELECT 'TTL';" -${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, d Date, data JSON) ENGINE = MergeTree ORDER BY id TTL d WHERE id % 2 = 1 SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 +${CLICKHOUSE_CLIENT} -q "CREATE TABLE t_json_empty_parts (id UInt64, d Date, data Object('json')) ENGINE = MergeTree ORDER BY id TTL d WHERE id % 2 = 1 SETTINGS old_parts_lifetime=5;" --allow_experimental_object_type 1 ${CLICKHOUSE_CLIENT} -q "INSERT INTO t_json_empty_parts VALUES (1, '2000-01-01', '{\"k1\": \"aaa\"}') (2, '2000-01-01', '{\"k2\": \"bbb\"}');" ${CLICKHOUSE_CLIENT} -q "OPTIMIZE TABLE t_json_empty_parts FINAL;" ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM t_json_empty_parts;" diff --git a/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh b/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh index 0cd520d8d5d..96692ba325a 100755 --- a/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh +++ b/tests/queries/0_stateless/02450_kill_distributed_query_deadlock.sh @@ -5,20 +5,24 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -# Test that running distributed query and cancel it ASAP, -# this can trigger a hung/deadlock in ProcessorList. -for i in {1..50}; do +# Test that runs a distributed query and cancels it ASAP, +# this has a chance to trigger a hung/deadlock in ProcessorList. +for i in {1..50} +do query_id="$CLICKHOUSE_TEST_UNIQUE_NAME-$i" - $CLICKHOUSE_CLIENT --format Null --query_id "$query_id" -q "select * from remote('127.{1|2|3|4|5|6}', numbers(1e12))" 2>/dev/null & - while :; do + $CLICKHOUSE_CLIENT --format Null --query_id "$query_id" --max_rows_to_read 0 --max_bytes_to_read 0 --max_result_rows 0 --max_result_bytes 0 -q "select * from remote('127.{1|2|3|4|5|6}', numbers(1e12))" 2>/dev/null & + while true + do killed_queries="$($CLICKHOUSE_CLIENT -q "kill query where query_id = '$query_id' sync" | wc -l)" - if [[ "$killed_queries" -ge 1 ]]; then + if [[ "$killed_queries" -ge 1 ]] + then break fi done wait -n query_return_status=$? - if [[ $query_return_status -eq 0 ]]; then + if [[ $query_return_status -eq 0 ]] + then echo "Query $query_id should be cancelled, however it returns successfully" fi done diff --git a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh index 6fd6da69b70..1027f18fc83 100755 --- a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh +++ b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh @@ -12,7 +12,7 @@ echo "Parquet" DATA_FILE=$CUR_DIR/data_parquet/list_monotonically_increasing_offsets.parquet ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (list Array(Int64), json Nullable(String)) ENGINE = Memory" -cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet" -${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum +cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} --max_memory_usage 10G -q "INSERT INTO parquet_load FORMAT Parquet" +${CLICKHOUSE_CLIENT} --max_result_rows 0 --max_result_bytes 0 --query="SELECT * FROM parquet_load" | md5sum ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load" ${CLICKHOUSE_CLIENT} --query="drop table parquet_load" diff --git a/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh b/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh index 0d0caa78ea3..e0648f4df6e 100755 --- a/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh +++ b/tests/queries/0_stateless/02482_json_nested_arrays_with_same_keys.sh @@ -21,7 +21,7 @@ echo ' } }' > 02482_object_data.jsonl -$CLICKHOUSE_LOCAL --allow_experimental_object_type=1 -q "select * from file(02482_object_data.jsonl, auto, 'obj JSON')" +$CLICKHOUSE_LOCAL --allow_experimental_object_type=1 -q "select * from file(02482_object_data.jsonl, auto, 'obj Object(''json'')')" rm 02482_object_data.jsonl diff --git a/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.reference b/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.reference new file mode 100644 index 00000000000..3eb1f72bfd6 --- /dev/null +++ b/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.reference @@ -0,0 +1 @@ +{"list":[{"nested":{"x":[{"r":"1"},{"r":"2"}]},"x":[{"r":"1"}]}]} diff --git a/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.sh b/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.sh new file mode 100755 index 00000000000..ae98946ad73 --- /dev/null +++ b/tests/queries/0_stateless/02482_new_json_nested_arrays_with_same_keys.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo ' +{ + "obj" : + { + "list" : + [ + { + "nested" : { + "x" : [{"r" : 1}, {"r" : 2}] + }, + "x" : [{"r" : 1}] + } + ] + } +}' > 02482_object_data.jsonl + +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select * from file(02482_object_data.jsonl, auto, 'obj JSON')" + +rm 02482_object_data.jsonl + diff --git a/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh b/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh index f747b3156a5..df7e9386662 100755 --- a/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh +++ b/tests/queries/0_stateless/02490_benchmark_max_consecutive_errors.sh @@ -11,5 +11,6 @@ if [ "$RES" -eq 10 ] then echo "$RES" else + echo "$RES" cat "${CLICKHOUSE_TMP}/${CLICKHOUSE_DATABASE}.log" fi diff --git a/tests/queries/0_stateless/02497_remote_disk_fat_column.sql b/tests/queries/0_stateless/02497_remote_disk_fat_column.sql index d97109b66f3..65519296602 100644 --- a/tests/queries/0_stateless/02497_remote_disk_fat_column.sql +++ b/tests/queries/0_stateless/02497_remote_disk_fat_column.sql @@ -2,7 +2,7 @@ set allow_suspicious_fixed_string_types=1; create table fat_granularity (x UInt32, fat FixedString(160000)) engine = MergeTree order by x settings storage_policy = 's3_cache'; -insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 8192, max_insert_threads=8; +insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 3000, max_insert_threads = 8, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; -- Too large sizes of FixedString to deserialize select x from fat_granularity prewhere fat like '256\_%' settings max_threads=2; diff --git a/tests/queries/0_stateless/02513_validate_data_types.sql b/tests/queries/0_stateless/02513_validate_data_types.sql index 5eb91ac7879..4996f63c5bd 100644 --- a/tests/queries/0_stateless/02513_validate_data_types.sql +++ b/tests/queries/0_stateless/02513_validate_data_types.sql @@ -1,9 +1,9 @@ -- Tags: no-fasttest set allow_experimental_object_type=0; -select CAST('{"x" : 1}', 'JSON'); -- {serverError ILLEGAL_COLUMN} +select CAST('{"x" : 1}', 'Object(''json'')'); -- {serverError ILLEGAL_COLUMN} desc file(nonexist.json, JSONAsObject); -- {serverError ILLEGAL_COLUMN} -desc file(nonexist.json, JSONEachRow, 'x JSON'); -- {serverError ILLEGAL_COLUMN} +desc file(nonexist.json, JSONEachRow, 'x Object(''json'')'); -- {serverError ILLEGAL_COLUMN} set allow_suspicious_low_cardinality_types=0; select CAST(1000000, 'LowCardinality(UInt64)'); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} diff --git a/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql b/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql index 80e3c0a9ece..b169cfd0ab9 100644 --- a/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql +++ b/tests/queries/0_stateless/02514_if_with_lazy_low_cardinality.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + create table if not exists t (`arr.key` Array(LowCardinality(String)), `arr.value` Array(LowCardinality(String))) engine = Memory; insert into t (`arr.key`, `arr.value`) values (['a'], ['b']); select if(true, if(lowerUTF8(arr.key) = 'a', 1, 2), 3) as x from t left array join arr; diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.reference b/tests/queries/0_stateless/02532_send_logs_level_test.reference index 72f4ea06184..e69de29bb2d 100644 --- a/tests/queries/0_stateless/02532_send_logs_level_test.reference +++ b/tests/queries/0_stateless/02532_send_logs_level_test.reference @@ -1,4 +0,0 @@ - MergeTreeReadPoolBase: Will use min_marks_per_task=24 - MergeTreeMarksLoader: Loading marks from path data.cmrk3 - MergeTreeRangeReader: First reader returned: num_rows: 1, columns: 1, total_rows_per_granule: 1, no filter, column[0]: Int32(size = 1), requested columns: key - MergeTreeRangeReader: read() returned num_rows: 1, columns: 1, total_rows_per_granule: 1, no filter, column[0]: Int32(size = 1), sample block key diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.sh b/tests/queries/0_stateless/02532_send_logs_level_test.sh index a50539311cb..a91e49ddd22 100755 --- a/tests/queries/0_stateless/02532_send_logs_level_test.sh +++ b/tests/queries/0_stateless/02532_send_logs_level_test.sh @@ -18,6 +18,10 @@ $CLICKHOUSE_CLIENT -m -q " # instead of "last" value, hence you cannot simply append another # --send_logs_level here. CLICKHOUSE_CLIENT_CLEAN=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=test/g') -$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& grep -o -e '.*' -e '.*' -$CLICKHOUSE_CLIENT -q "drop table data" +set -e + +trap '$CLICKHOUSE_CLIENT -q "drop table data"' EXIT + +$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& (! grep -q -o -e '.*') +$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& grep -q -o -e '.*' diff --git a/tests/queries/0_stateless/02534_keyed_siphash.reference b/tests/queries/0_stateless/02534_keyed_siphash.reference index 3f478218ff1..31c0cae8981 100644 --- a/tests/queries/0_stateless/02534_keyed_siphash.reference +++ b/tests/queries/0_stateless/02534_keyed_siphash.reference @@ -236,6 +236,13 @@ Check asan bug 0 Check bug found fuzzing 9042C6691B1A75F0EA3314B6F55728BB -Check bug 2 found fuzzing +Test arrays and maps 608E1FF030C9E206185B112C2A25F1A7 ABB65AE97711A2E053E324ED88B1D08B +Test emtpy arrays and maps +4761183170873013810 +0AD04BFD000000000000000000000000 +4761183170873013810 +0AD04BFD000000000000000000000000 +16734549324845627102 +D675BB3D687973A238AB891DD99C7047 diff --git a/tests/queries/0_stateless/02534_keyed_siphash.sql b/tests/queries/0_stateless/02534_keyed_siphash.sql index fb707109c83..b499d8ef02b 100644 --- a/tests/queries/0_stateless/02534_keyed_siphash.sql +++ b/tests/queries/0_stateless/02534_keyed_siphash.sql @@ -263,10 +263,10 @@ select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)); select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)); -select sipHash64Keyed((0, 0), '1'); -- { serverError NOT_IMPLEMENTED } -select sipHash128Keyed((0, 0), '1'); -- { serverError NOT_IMPLEMENTED } -select sipHash64Keyed(toUInt64(0), '1'); -- { serverError NOT_IMPLEMENTED } -select sipHash128Keyed(toUInt64(0), '1'); -- { serverError NOT_IMPLEMENTED } +select sipHash64Keyed((0, 0), '1'); -- { serverError BAD_ARGUMENTS } +select sipHash128Keyed((0, 0), '1'); -- { serverError BAD_ARGUMENTS } +select sipHash64Keyed(toUInt64(0), '1'); -- { serverError BAD_ARGUMENTS } +select sipHash128Keyed(toUInt64(0), '1'); -- { serverError BAD_ARGUMENTS } select hex(sipHash64()); SELECT hex(sipHash128()); @@ -339,9 +339,17 @@ SELECT 'Check bug found fuzzing'; SELECT [(255, 1048575)], sipHash128ReferenceKeyed((toUInt64(2147483646), toUInt64(9223372036854775807)), ([(NULL, 100), (NULL, NULL), (1024, 10)], toUInt64(2), toUInt64(1024)), ''), hex(sipHash128ReferenceKeyed((-9223372036854775807, 1.), '-1', NULL)), ('', toUInt64(65535), [(9223372036854775807, 9223372036854775806)], toUInt64(65536)), arrayJoin((NULL, 65537, 255), [(NULL, NULL)]) GROUP BY tupleElement((NULL, NULL, NULL, -1), toUInt64(2), 2) = NULL; -- { serverError NOT_IMPLEMENTED } SELECT hex(sipHash128ReferenceKeyed((0::UInt64, 0::UInt64), ([1, 1]))); -SELECT 'Check bug 2 found fuzzing'; +SELECT 'Test arrays and maps'; DROP TABLE IF EXISTS sipHashKeyed_keys; CREATE TABLE sipHashKeyed_keys (`a` Map(String, String)) ENGINE = Memory; INSERT INTO sipHashKeyed_keys FORMAT VALUES ({'a':'b', 'c':'d'}), ({'e':'f', 'g':'h'}); SELECT hex(sipHash128ReferenceKeyed((0::UInt64, materialize(0::UInt64)), a)) FROM sipHashKeyed_keys ORDER BY a; DROP TABLE sipHashKeyed_keys; + +SELECT 'Test emtpy arrays and maps'; +SELECT sipHash64Keyed((1::UInt64, 2::UInt64), []); +SELECT hex(sipHash128Keyed((1::UInt64, 2::UInt64), [])); +SELECT sipHash64Keyed((1::UInt64, 2::UInt64), mapFromArrays([], [])); +SELECT hex(sipHash128Keyed((1::UInt64, 2::UInt64), mapFromArrays([], []))); +SELECT sipHash64Keyed((1::UInt64, 2::UInt64), map([0], 1, [2], 3)); +SELECT hex(sipHash128Keyed((0::UInt64, 0::UInt64), map([0], 1, [2], 3))); diff --git a/tests/queries/0_stateless/02536_delta_gorilla_corruption.sql b/tests/queries/0_stateless/02536_delta_gorilla_corruption.sql index a4e0965e329..3accc726d08 100644 --- a/tests/queries/0_stateless/02536_delta_gorilla_corruption.sql +++ b/tests/queries/0_stateless/02536_delta_gorilla_corruption.sql @@ -12,7 +12,7 @@ create table bug_delta_gorilla (value_bug UInt64 codec (Delta, Gorilla)) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi' -as (select 0 from numbers(30000000)); +as (select 0 from numbers(20000000)); select count(*) from bug_delta_gorilla diff --git a/tests/queries/0_stateless/02552_siphash128_reference.sql b/tests/queries/0_stateless/02552_siphash128_reference.sql index f7324ed0ee4..46f292d667d 100644 --- a/tests/queries/0_stateless/02552_siphash128_reference.sql +++ b/tests/queries/0_stateless/02552_siphash128_reference.sql @@ -200,8 +200,8 @@ select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)); select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)); -select sipHash128ReferenceKeyed((0, 0), '1'); -- { serverError NOT_IMPLEMENTED } -select sipHash128ReferenceKeyed(toUInt64(0), '1'); -- { serverError NOT_IMPLEMENTED } +select sipHash128ReferenceKeyed((0, 0), '1'); -- { serverError BAD_ARGUMENTS } +select sipHash128ReferenceKeyed(toUInt64(0), '1'); -- { serverError BAD_ARGUMENTS } SELECT hex(sipHash128Reference()) = hex(reverse(unhex('1CE422FEE7BD8DE20000000000000000'))) or hex(sipHash128()) = '1CE422FEE7BD8DE20000000000000000'; SELECT hex(sipHash128ReferenceKeyed()) = hex(reverse(unhex('1CE422FEE7BD8DE20000000000000000'))) or hex(sipHash128Keyed()) = '1CE422FEE7BD8DE20000000000000000'; diff --git a/tests/queries/0_stateless/02553_new_type_json_attach_partition.reference b/tests/queries/0_stateless/02553_new_type_json_attach_partition.reference new file mode 100644 index 00000000000..1556b015503 --- /dev/null +++ b/tests/queries/0_stateless/02553_new_type_json_attach_partition.reference @@ -0,0 +1,2 @@ +{"b":"1","c":{"k1":"1"}} +{"b":"1","c":{"k1":["1","2"]}} diff --git a/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql b/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql new file mode 100644 index 00000000000..c7d4c0b5d55 --- /dev/null +++ b/tests/queries/0_stateless/02553_new_type_json_attach_partition.sql @@ -0,0 +1,15 @@ +SET allow_experimental_json_type = 1; + +DROP TABLE IF EXISTS t_json_attach_partition; + +CREATE TABLE t_json_attach_partition(b UInt64, c JSON) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": 1}}; + +ALTER TABLE t_json_attach_partition DETACH PARTITION tuple(); +INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": [1, 2]}}; + +ALTER TABLE t_json_attach_partition ATTACH PARTITION tuple(); +SELECT * FROM t_json_attach_partition ORDER BY toString(c) FORMAT JSONEachRow; + +DROP TABLE t_json_attach_partition; diff --git a/tests/queries/0_stateless/02553_type_json_attach_partition.sql b/tests/queries/0_stateless/02553_type_json_attach_partition.sql index e77f5885ec3..428189f3a84 100644 --- a/tests/queries/0_stateless/02553_type_json_attach_partition.sql +++ b/tests/queries/0_stateless/02553_type_json_attach_partition.sql @@ -2,7 +2,7 @@ SET allow_experimental_object_type = 1; DROP TABLE IF EXISTS t_json_attach_partition; -CREATE TABLE t_json_attach_partition(b UInt64, c JSON) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t_json_attach_partition(b UInt64, c Object('json')) ENGINE = MergeTree ORDER BY tuple(); INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": 1}}; diff --git a/tests/queries/0_stateless/02553_type_object_analyzer.sql b/tests/queries/0_stateless/02553_type_object_analyzer.sql index eb4e49757cf..e5dd6eaebc0 100644 --- a/tests/queries/0_stateless/02553_type_object_analyzer.sql +++ b/tests/queries/0_stateless/02553_type_object_analyzer.sql @@ -3,7 +3,7 @@ SET allow_experimental_object_type = 1; SET enable_analyzer = 1; DROP TABLE IF EXISTS t_json_analyzer; -CREATE TABLE t_json_analyzer (a JSON) ENGINE = Memory; +CREATE TABLE t_json_analyzer (a Object('json')) ENGINE = Memory; INSERT INTO t_json_analyzer VALUES ('{"id": 2, "obj": {"k2": {"k3": "str", "k4": [{"k6": 55}]}, "some": 42}, "s": "bar"}'); SELECT any(a) AS data FROM t_json_analyzer FORMAT JSONEachRow; diff --git a/tests/queries/0_stateless/02585_query_status_deadlock.sh b/tests/queries/0_stateless/02585_query_status_deadlock.sh index e3e34109cdb..932cf593393 100755 --- a/tests/queries/0_stateless/02585_query_status_deadlock.sh +++ b/tests/queries/0_stateless/02585_query_status_deadlock.sh @@ -7,8 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) QUERY_ID="${CLICKHOUSE_DATABASE}_test_02585_query_to_kill_id_1" -$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" -n -q " -create temporary table tmp as select * from numbers(500000000); +$CLICKHOUSE_CLIENT --query_id="$QUERY_ID" --max_rows_to_read 0 -n -q " +create temporary table tmp as select * from numbers(100000000); select * from remote('127.0.0.2', 'system.numbers_mt') where number in (select * from tmp);" &> /dev/null & $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" @@ -19,8 +19,7 @@ do if [ -n "$res" ]; then break fi - sleep 1 + sleep 1 done $CLICKHOUSE_CLIENT -q "kill query where query_id = '$QUERY_ID' sync" &> /dev/null - diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh index 6d770b308b5..ff534a6a2e6 100755 --- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh +++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest +# Tags: no-fasttest, no-random-merge-tree-settings # Tag no-fasttest: needs s3 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) @@ -17,7 +17,9 @@ WITH '(\\w+): (\\d+)' AS pattern, WHERE line LIKE '% S3%' AND line NOT LIKE '%Microseconds%' AND line NOT LIKE '%S3DiskConnections%' - AND line NOT LIKE '%S3DiskAddresses') AS pe_map + AND line NOT LIKE '%S3DiskAddresses%' + AND line NOT LIKE '%RequestThrottlerCount%' + ) AS pe_map SELECT * FROM ( SELECT untuple(arrayJoin(pe_map) AS pe) WHERE tupleElement(pe, 1) not like '%WriteRequests%' diff --git a/tests/queries/0_stateless/02697_stop_reading_on_first_cancel.sh b/tests/queries/0_stateless/02697_stop_reading_on_first_cancel.sh index 2be13588453..5a2cec08eca 100755 --- a/tests/queries/0_stateless/02697_stop_reading_on_first_cancel.sh +++ b/tests/queries/0_stateless/02697_stop_reading_on_first_cancel.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) QUERY_ID="${CLICKHOUSE_DATABASE}_read_with_cancel" -$CLICKHOUSE_CLIENT -n --query_id="$QUERY_ID" --query="SELECT sum(number * 0) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true;" & +$CLICKHOUSE_CLIENT --max_rows_to_read 0 -n --query_id="$QUERY_ID" --query="SELECT sum(number * 0) FROM numbers(10000000000) SETTINGS partial_result_on_first_cancel=true;" & pid=$! for _ in {0..60} diff --git a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh index a34a480a078..cfb38c60615 100755 --- a/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh +++ b/tests/queries/0_stateless/02700_s3_part_INT_MAX.sh @@ -10,7 +10,9 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # NOTE: .sh test is used over .sql because it needs $CLICKHOUSE_DATABASE to # avoid truncation, since seems that the version of MinIO that is used on CI # too slow with this. -$CLICKHOUSE_CLIENT -nm -q " +# +# Unfortunately, the test has to buffer it in memory. +$CLICKHOUSE_CLIENT --max_memory_usage 16G -nm -q " INSERT INTO FUNCTION s3('http://localhost:11111/test/$CLICKHOUSE_DATABASE/test_INT_MAX.tsv', '', '', 'TSV') SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024) SETTINGS s3_max_single_part_upload_size = '5Gi'; diff --git a/tests/queries/0_stateless/02717_pretty_json.sql b/tests/queries/0_stateless/02717_pretty_json.sql index 8a49eb50adf..1a5c090bcb2 100644 --- a/tests/queries/0_stateless/02717_pretty_json.sql +++ b/tests/queries/0_stateless/02717_pretty_json.sql @@ -1,3 +1,3 @@ set allow_experimental_object_type=1; -select 42 as num, [42, 42] as arr, [[[42, 42], [42, 42]], [[42, 42]]] as nested_arr, tuple(42, 42)::Tuple(a UInt32, b UInt32) as tuple, tuple(tuple(tuple(42, 42), 42), 42)::Tuple(a Tuple(b Tuple(c UInt32, d UInt32), e UInt32), f UInt32) as nested_tuple, map(42, 42, 24, 24) as map, map(42, map(42, map(42, 42))) as nested_map, [tuple(map(42, 42), [42, 42]), tuple(map(42, 42), [42, 42])]::Array(Tuple(Map(UInt32, UInt32), Array(UInt32))) as nested_types, '{"a" : {"b" : 1, "c" : 2}}'::JSON as json_object format PrettyNDJSON; +select 42 as num, [42, 42] as arr, [[[42, 42], [42, 42]], [[42, 42]]] as nested_arr, tuple(42, 42)::Tuple(a UInt32, b UInt32) as tuple, tuple(tuple(tuple(42, 42), 42), 42)::Tuple(a Tuple(b Tuple(c UInt32, d UInt32), e UInt32), f UInt32) as nested_tuple, map(42, 42, 24, 24) as map, map(42, map(42, map(42, 42))) as nested_map, [tuple(map(42, 42), [42, 42]), tuple(map(42, 42), [42, 42])]::Array(Tuple(Map(UInt32, UInt32), Array(UInt32))) as nested_types, '{"a" : {"b" : 1, "c" : 2}}'::Object('json') as json_object format PrettyNDJSON; diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.reference b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.reference index de0f151db7d..cb905d63ca5 100644 --- a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.reference +++ b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.reference @@ -44,7 +44,7 @@ nested.col1 Array(String) NO \N nested.col2 Array(UInt32) NO \N nfs Nullable(FixedString(3)) YES \N ns Nullable(String) YES \N -o Object(\'json\') NO \N +o JSON NO \N p Point NO \N pg Polygon NO \N r Ring NO \N diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql index 3bbcbb1a535..dadfa59bf87 100644 --- a/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql +++ b/tests/queries/0_stateless/02775_show_columns_called_from_clickhouse.sql @@ -11,7 +11,7 @@ DROP TABLE IF EXISTS tab; SET allow_suspicious_low_cardinality_types=1; -SET allow_experimental_object_type=1; +SET allow_experimental_json_type=1; CREATE TABLE tab ( diff --git a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect index 4798a6958c6..2079da9d34a 100755 --- a/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect +++ b/tests/queries/0_stateless/02775_show_columns_called_from_mysql.expect @@ -33,7 +33,7 @@ send -- "DROP TABLE IF EXISTS tab;\r" expect "Query OK, 0 rows affected" send -- "SET allow_suspicious_low_cardinality_types=1;\r" -send -- "SET allow_experimental_object_type=1;\r" +send -- "SET allow_experimental_json_type=1;\r" send -- " CREATE TABLE tab diff --git a/tests/queries/0_stateless/02786_max_execution_time_leaf.sql b/tests/queries/0_stateless/02786_max_execution_time_leaf.sql index f678c913b46..2e4623f4ac6 100644 --- a/tests/queries/0_stateless/02786_max_execution_time_leaf.sql +++ b/tests/queries/0_stateless/02786_max_execution_time_leaf.sql @@ -1,4 +1,5 @@ -- Tags: no-fasttest +SET max_rows_to_read = 0; SELECT count() FROM cluster('test_cluster_two_shards', view( SELECT * FROM numbers(100000000000) )) SETTINGS max_execution_time_leaf = 1; -- { serverError TIMEOUT_EXCEEDED } -- Can return partial result SELECT count() FROM cluster('test_cluster_two_shards', view( SELECT * FROM numbers(100000000000) )) FORMAT Null SETTINGS max_execution_time_leaf = 1, timeout_overflow_mode_leaf = 'break'; diff --git a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference index 7764974255b..877bb5f390f 100644 --- a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference +++ b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference @@ -1 +1,2 @@ 424242424242424242424242424242424242424242424242424242 +22707864971053448441042714569797161695738549521977760418632926980540162388532 diff --git a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh index 8865b2e7aab..0f590027f19 100755 --- a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh +++ b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh @@ -5,5 +5,8 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh +# This is parsed as text. $CLICKHOUSE_LOCAL -q "select toString(424242424242424242424242424242424242424242424242424242::UInt256) as x format Parquet" | $CLICKHOUSE_LOCAL --input-format=Parquet --structure='x UInt256' -q "select * from table" +# But this is parsed as binary because text length happens to be 32 bytes. Not ideal. +$CLICKHOUSE_LOCAL -q "select toString(42424242424242424242424242424242::UInt256) as x format Parquet" | $CLICKHOUSE_LOCAL --input-format=Parquet --structure='x UInt256' -q "select * from table" diff --git a/tests/queries/0_stateless/02792_drop_projection_lwd.sql b/tests/queries/0_stateless/02792_drop_projection_lwd.sql index dcde7dcc600..dad7f7cd028 100644 --- a/tests/queries/0_stateless/02792_drop_projection_lwd.sql +++ b/tests/queries/0_stateless/02792_drop_projection_lwd.sql @@ -7,7 +7,7 @@ CREATE TABLE t_projections_lwd (a UInt32, b UInt32, PROJECTION p (SELECT * ORDER INSERT INTO t_projections_lwd SELECT number, number FROM numbers(100); -- LWD does not work, as expected -DELETE FROM t_projections_lwd WHERE a = 1; -- { serverError NOT_IMPLEMENTED } +DELETE FROM t_projections_lwd WHERE a = 1; -- { serverError SUPPORT_IS_DISABLED } KILL MUTATION WHERE database = currentDatabase() AND table = 't_projections_lwd' SYNC FORMAT Null; -- drop projection diff --git a/tests/queries/0_stateless/02807_lower_utf8_msan.sql b/tests/queries/0_stateless/02807_lower_utf8_msan.sql index e9eb18bf615..95f224577f7 100644 --- a/tests/queries/0_stateless/02807_lower_utf8_msan.sql +++ b/tests/queries/0_stateless/02807_lower_utf8_msan.sql @@ -1,2 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + SELECT lowerUTF8(arrayJoin(['©--------------------------------------', '©--------------------'])) ORDER BY 1; SELECT upperUTF8(materialize('aaaaАБВГaaaaaaaaaaaaАБВГAAAAaaAA')) FROM numbers(2); diff --git a/tests/queries/0_stateless/02818_memory_profiler_sample_min_max_allocation_size.sh b/tests/queries/0_stateless/02818_memory_profiler_sample_min_max_allocation_size.sh index 9234c428147..e2afc1d208c 100755 --- a/tests/queries/0_stateless/02818_memory_profiler_sample_min_max_allocation_size.sh +++ b/tests/queries/0_stateless/02818_memory_profiler_sample_min_max_allocation_size.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh query_id="${CLICKHOUSE_DATABASE}_min_max_allocation_size_$RANDOM$RANDOM" -${CLICKHOUSE_CLIENT} --query_id="$query_id" --memory_profiler_sample_min_allocation_size=4096 --memory_profiler_sample_max_allocation_size=8192 --log_queries=1 --max_threads=1 --max_untracked_memory=0 --memory_profiler_sample_probability=1 --query "select randomPrintableASCII(number) from numbers(1000) FORMAT Null" +${CLICKHOUSE_CLIENT} --query_id="$query_id" --memory_profiler_sample_min_allocation_size=4096 --memory_profiler_sample_max_allocation_size=16384 --log_queries=1 --max_threads=1 --max_untracked_memory=0 --memory_profiler_sample_probability=1 --query "select randomPrintableASCII(number) from numbers(1000) FORMAT Null" ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" @@ -14,4 +14,4 @@ ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" ${CLICKHOUSE_CLIENT} --query "SELECT countDistinct(abs(size)) > 0 FROM system.trace_log where query_id='$query_id' and trace_type = 'MemorySample'" # show wrong allocations -${CLICKHOUSE_CLIENT} --query "SELECT abs(size) FROM system.trace_log where query_id='$query_id' and trace_type = 'MemorySample' and (abs(size) > 8192 or abs(size) < 4096)" +${CLICKHOUSE_CLIENT} --query "SELECT abs(size) FROM system.trace_log where query_id='$query_id' and trace_type = 'MemorySample' and (abs(size) > 16384 or abs(size) < 4096)" diff --git a/tests/queries/0_stateless/02835_drop_user_during_session.sh b/tests/queries/0_stateless/02835_drop_user_during_session.sh index c32003a2a11..01e4f9a5c2b 100755 --- a/tests/queries/0_stateless/02835_drop_user_during_session.sh +++ b/tests/queries/0_stateless/02835_drop_user_during_session.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-debug +# Tags: no-debug, no-random-settings, no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02841_parquet_filter_pushdown.reference b/tests/queries/0_stateless/02841_parquet_filter_pushdown.reference index 4adf418bcc7..8003b9cb626 100644 --- a/tests/queries/0_stateless/02841_parquet_filter_pushdown.reference +++ b/tests/queries/0_stateless/02841_parquet_filter_pushdown.reference @@ -71,3 +71,5 @@ d256 Nullable(Decimal(76, 40)) 500 244750 500 244750 500 244750 +42 +100 diff --git a/tests/queries/0_stateless/02841_parquet_filter_pushdown.sql b/tests/queries/0_stateless/02841_parquet_filter_pushdown.sql index 950485d53f0..52caee50b32 100644 --- a/tests/queries/0_stateless/02841_parquet_filter_pushdown.sql +++ b/tests/queries/0_stateless/02841_parquet_filter_pushdown.sql @@ -131,3 +131,9 @@ select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, select count(), sum(number) from file('02841.parquet') where indexHint(string_or_null == ''); -- quirk with infinities select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, string_or_null String') where indexHint(string_or_null == ''); select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, nEgAtIvE_oR_nUlL Int64') where indexHint(nEgAtIvE_oR_nUlL > -50) settings input_format_parquet_case_insensitive_column_matching = 1; + +-- Bad type conversions. +insert into function file('02841.parquet') select 42 as x; +select * from file('02841.parquet', Parquet, 'x Nullable(String)') where x not in (1); +insert into function file('t.parquet', Parquet, 'x String') values ('1'), ('100'), ('2'); +select * from file('t.parquet', Parquet, 'x Int64') where x >= 3; diff --git a/tests/queries/0_stateless/02841_parquet_filter_pushdown_bug.reference b/tests/queries/0_stateless/02841_parquet_filter_pushdown_bug.reference new file mode 100644 index 00000000000..6ed63af507a --- /dev/null +++ b/tests/queries/0_stateless/02841_parquet_filter_pushdown_bug.reference @@ -0,0 +1 @@ +[1,2] diff --git a/tests/queries/0_stateless/02841_parquet_filter_pushdown_bug.sh b/tests/queries/0_stateless/02841_parquet_filter_pushdown_bug.sh new file mode 100755 index 00000000000..58eb207b6e6 --- /dev/null +++ b/tests/queries/0_stateless/02841_parquet_filter_pushdown_bug.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "select f from file('$CURDIR/data_parquet/68131.parquet', Parquet, 'f Array(Int32)')" \ No newline at end of file diff --git a/tests/queries/0_stateless/02844_subquery_timeout_with_break.sql b/tests/queries/0_stateless/02844_subquery_timeout_with_break.sql index 511ed0c59de..00b527a9378 100644 --- a/tests/queries/0_stateless/02844_subquery_timeout_with_break.sql +++ b/tests/queries/0_stateless/02844_subquery_timeout_with_break.sql @@ -4,7 +4,7 @@ CREATE TABLE t (key UInt64, value UInt64, INDEX value_idx value TYPE bloom_filte INSERT INTO t SELECT number, rand()%1000 FROM numbers(10000); SET timeout_overflow_mode='break'; -SET max_execution_time=0.1; +SET max_execution_time=0.1, max_rows_to_read=0; SELECT * FROM t WHERE value IN (SELECT number FROM numbers(1000000000)); DROP TABLE t; diff --git a/tests/queries/0_stateless/02870_per_column_settings.sql b/tests/queries/0_stateless/02870_per_column_settings.sql index d242ebe6c61..c3050222bc8 100644 --- a/tests/queries/0_stateless/02870_per_column_settings.sql +++ b/tests/queries/0_stateless/02870_per_column_settings.sql @@ -49,7 +49,7 @@ CREATE TABLE tab ( id UInt64, tup Tuple(UInt64, UInt64) SETTINGS (min_compress_block_size = 81920, max_compress_block_size = 163840), - json JSON SETTINGS (min_compress_block_size = 81920, max_compress_block_size = 163840), + json Object('json') SETTINGS (min_compress_block_size = 81920, max_compress_block_size = 163840), ) ENGINE = MergeTree ORDER BY id diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.reference b/tests/queries/0_stateless/02884_parallel_window_functions.reference index bac15838dc2..1f5346a1484 100644 --- a/tests/queries/0_stateless/02884_parallel_window_functions.reference +++ b/tests/queries/0_stateless/02884_parallel_window_functions.reference @@ -12,7 +12,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -32,7 +32,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -42,6 +42,7 @@ SETTINGS max_threads = 1; 0 2 0 1 2 0 2 2 0 +SET max_rows_to_read = 40000000; SELECT nw, sum(WR) AS R, @@ -53,7 +54,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 0 GROUP BY ac, @@ -64,7 +65,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 1 GROUP BY ac, @@ -75,7 +76,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 2 GROUP BY ac, @@ -86,7 +87,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 3 GROUP BY ac, diff --git a/tests/queries/0_stateless/02884_parallel_window_functions.sql b/tests/queries/0_stateless/02884_parallel_window_functions.sql index c5ab013a198..2207c90a4ee 100644 --- a/tests/queries/0_stateless/02884_parallel_window_functions.sql +++ b/tests/queries/0_stateless/02884_parallel_window_functions.sql @@ -1,6 +1,6 @@ -- Tags: long, no-tsan, no-asan, no-ubsan, no-msan, no-debug -CREATE TABLE window_funtion_threading +CREATE TABLE window_function_threading Engine = MergeTree ORDER BY (ac, nw) AS SELECT @@ -20,7 +20,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -40,7 +40,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -58,7 +58,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading GROUP BY ac, nw ) GROUP BY nw @@ -66,6 +66,8 @@ ORDER BY nw ASC, R DESC LIMIT 10 SETTINGS max_threads = 1; +SET max_rows_to_read = 40000000; + SELECT nw, sum(WR) AS R, @@ -77,7 +79,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 0 GROUP BY ac, @@ -88,7 +90,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 1 GROUP BY ac, @@ -99,7 +101,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 2 GROUP BY ac, @@ -110,7 +112,7 @@ FROM AVG(wg) AS WR, ac, nw - FROM window_funtion_threading + FROM window_function_threading WHERE (ac % 4) = 3 GROUP BY ac, diff --git a/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql b/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql index ec86a66c7dd..ecaad62b35a 100644 --- a/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql +++ b/tests/queries/0_stateless/02896_max_execution_time_with_break_overflow_mode.sql @@ -1,5 +1,7 @@ -- Tags: no-fasttest +SET max_rows_to_read = 0, max_execution_time = 0, max_estimated_execution_time = 0; + -- Query stops after timeout without an error SELECT * FROM numbers(100000000) SETTINGS max_block_size=1, max_execution_time=2, timeout_overflow_mode='break' FORMAT Null; diff --git a/tests/queries/0_stateless/02907_backup_restore_flatten_nested.reference b/tests/queries/0_stateless/02907_backup_restore_flatten_nested.reference index aa8f22f590a..0db19f0591a 100644 --- a/tests/queries/0_stateless/02907_backup_restore_flatten_nested.reference +++ b/tests/queries/0_stateless/02907_backup_restore_flatten_nested.reference @@ -1,8 +1,8 @@ BACKUP_CREATED -CREATE TABLE default.test\n(\n `test` Array(Tuple(foo String, bar Float64))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.test\n(\n `test` Array(Tuple(\n foo String,\n bar Float64))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 BACKUP_CREATED CREATE TABLE default.test2\n(\n `test` Nested(foo String, bar Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 RESTORED -CREATE TABLE default.test\n(\n `test` Array(Tuple(foo String, bar Float64))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.test\n(\n `test` Array(Tuple(\n foo String,\n bar Float64))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 RESTORED CREATE TABLE default.test2\n(\n `test` Nested(foo String, bar Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference index d4191af1594..41a60204eab 100644 --- a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference +++ b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.reference @@ -1,2 +1,2 @@ -1048576 10000000 33554432 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection_sql 0 5000 0 16 -1048576 10000000 33554432 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection 0 5000 0 16 +1048576 10000000 33554432 1 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection_sql 0 5000 0 16 +1048576 10000000 33554432 1 4194304 0 0 0 0 /var/lib/clickhouse/filesystem_caches/collection 0 5000 0 16 diff --git a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql index c7216833bc9..127baa8304e 100644 --- a/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql +++ b/tests/queries/0_stateless/02908_filesystem_cache_as_collection.sql @@ -3,8 +3,8 @@ CREATE NAMED COLLECTION IF NOT EXISTS cache_collection_sql AS path = 'collection_sql', max_size = '1Mi'; DROP TABLE IF EXISTS test; CREATE TABLE test (a Int32, b String) -ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME', cache_name='cache_collection_sql'); +ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME', cache_name='cache_collection_sql', load_metadata_asynchronously = 0); DESCRIBE FILESYSTEM CACHE '$CLICHOUSE_TEST_UNIQUE_NAME'; CREATE TABLE test2 (a Int32, b String) -ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME_2', cache_name='cache_collection'); +ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = '$CLICHOUSE_TEST_UNIQUE_NAME_2', cache_name='cache_collection', load_metadata_asynchronously = 0); DESCRIBE FILESYSTEM CACHE '$CLICHOUSE_TEST_UNIQUE_NAME_2'; diff --git a/tests/queries/0_stateless/02910_object-json-crash-add-column.sql b/tests/queries/0_stateless/02910_object-json-crash-add-column.sql index bda5e958453..97672bf89c6 100644 --- a/tests/queries/0_stateless/02910_object-json-crash-add-column.sql +++ b/tests/queries/0_stateless/02910_object-json-crash-add-column.sql @@ -11,10 +11,10 @@ ORDER BY i; INSERT INTO test02910 (i, jString) SELECT 1, '{"a":"123"}'; -ALTER TABLE test02910 ADD COLUMN j2 Tuple(JSON) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910 ADD COLUMN j2 Tuple(Float64, JSON); -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910 ADD COLUMN j2 Tuple(Array(Tuple(JSON))) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910 ADD COLUMN j2 JSON default jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910 ADD COLUMN j2 Tuple(Object('json')) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910 ADD COLUMN j2 Tuple(Float64, Object('json')); -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910 ADD COLUMN j2 Tuple(Array(Tuple(Object('json')))) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910 ADD COLUMN j2 Object('json') default jString; -- { serverError SUPPORT_IS_DISABLED } -- If we would allow adding a column with dynamic subcolumns the subsequent select would crash the server. -- SELECT * FROM test02910; @@ -39,10 +39,10 @@ INSERT INTO test02910_second SELECT number, number, '2023-10-28 11:11:11.11111', INSERT INTO test02910_second SELECT number, number, '2023-10-28 11:11:11.11111', ['c', 'd'] FROM numbers(10); INSERT INTO test02910_second SELECT number, number, '2023-10-28 11:11:11.11111', [] FROM numbers(10); -ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(JSON) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Float64, JSON); -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Array(Tuple(JSON))) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } -ALTER TABLE test02910_second ADD COLUMN `tags_json` JSON; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Object('json')) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Float64, Object('json')); -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910_second ADD COLUMN `tags_json` Tuple(Array(Tuple(Object('json')))) DEFAULT jString; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test02910_second ADD COLUMN `tags_json` Object('json'); -- { serverError SUPPORT_IS_DISABLED } -- If we would allow adding a column with dynamic subcolumns the subsequent select would crash the server. -- SELECT * FROM test02910; diff --git a/tests/queries/0_stateless/02915_sleep_large_uint.sql b/tests/queries/0_stateless/02915_sleep_large_uint.sql index f7c04ab6d1f..08b6c580a28 100644 --- a/tests/queries/0_stateless/02915_sleep_large_uint.sql +++ b/tests/queries/0_stateless/02915_sleep_large_uint.sql @@ -1,6 +1,7 @@ SELECT sleep(3.40282e+44); -- { serverError BAD_ARGUMENTS } SELECT sleep((pow(2, 64) / 1000000) - 1); -- { serverError BAD_ARGUMENTS } SELECT sleepEachRow(184467440737095516) from numbers(10000); -- { serverError BAD_ARGUMENTS } +SET max_rows_to_read = 0; SELECT sleepEachRow(pow(2, 31)) from numbers(9007199254740992) settings function_sleep_max_microseconds_per_block = 8589934592000000000; -- { serverError TOO_SLOW } -- Another corner case, but it requires lots of memory to run (huge block size) diff --git a/tests/queries/0_stateless/02916_glogal_in_cancel.sql b/tests/queries/0_stateless/02916_glogal_in_cancel.sql index ad54f1ecdec..dd61795947a 100644 --- a/tests/queries/0_stateless/02916_glogal_in_cancel.sql +++ b/tests/queries/0_stateless/02916_glogal_in_cancel.sql @@ -1,2 +1,2 @@ -set max_execution_time = 0.5, timeout_overflow_mode = 'break'; +set max_execution_time = 0.5, timeout_overflow_mode = 'break', max_rows_to_read = 0; SELECT number FROM remote('127.0.0.{3|2}', numbers(1)) WHERE number GLOBAL IN (SELECT number FROM numbers(10000000000.)) format Null; diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference new file mode 100644 index 00000000000..bfc6add90a7 --- /dev/null +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.reference @@ -0,0 +1,32 @@ +<1: created view> a [] 1 +CREATE MATERIALIZED VIEW default.a\nREFRESH AFTER 2 SECOND\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT number AS x\nFROM numbers(2)\nUNION ALL\nSELECT rand64() AS x +<2: refreshed> 3 1 1 +<3: time difference at least> 1000 +<4: next refresh in> 2 +<4.1: fake clock> Scheduled 2050-01-01 00:00:01 2050-01-01 00:00:03 +<4.5: altered> Scheduled Finished 2052-01-01 00:00:00 +CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT x * 2 AS x\nFROM default.src +<5: no refresh> 3 +<6: refreshed> 2 +<7: refreshed> Scheduled Finished 2054-01-01 00:00:00 +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a +<7.5: created dependent> 2052-11-11 11:11:11 +<8: refreshed> 20 +<9: refreshed> a Scheduled Finished 2054-01-01 00:00:00 +<9: refreshed> b Scheduled Finished 2054-01-01 00:00:00 +<9.2: dropping> 0 2 +<9.4: dropped> 0 2 +<10: creating> a Scheduled [] 2054-01-01 00:00:00 +<10: creating> b WaitingForDependencies ['default.a'] 2054-01-01 00:00:00 +<11: chain-refreshed a> 4 +<12: chain-refreshed b> 40 +<13: chain-refreshed> a Scheduled [] Finished 2054-01-01 00:00:01 2056-01-01 00:00:00 1 +<13: chain-refreshed> b Scheduled ['default.a'] Finished 2054-01-24 23:22:21 2056-01-01 00:00:00 1 +<14: waiting for next cycle> a Scheduled [] 2058-01-01 00:00:00 +<14: waiting for next cycle> b WaitingForDependencies ['default.a'] 2060-01-01 00:00:00 +<15: chain-refreshed a> 6 +<16: chain-refreshed b> 60 +<17: chain-refreshed> a Scheduled 2062-01-01 00:00:00 +<17: chain-refreshed> b Scheduled 2062-01-01 00:00:00 +<18: removed dependency> b Scheduled [] 2062-03-03 03:03:03 2064-01-01 00:00:00 5 +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh new file mode 100755 index 00000000000..2b92a113e91 --- /dev/null +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_1.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash +# Tags: atomic-database + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Set session timezone to UTC to make all DateTime formatting and parsing use UTC, because refresh +# scheduling is done in UTC. +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --session_timezone Etc/UTC"`" + +$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" + + +# Basic refreshing. +$CLICKHOUSE_CLIENT -nq " + create materialized view a + refresh after 2 second + engine Memory + empty + as select number as x from numbers(2) union all select rand64() as x; + select '<1: created view>', view, remaining_dependencies, exception, last_refresh_result in ('Unknown', 'Finished') from refreshes; + show create a;" +# Wait for any refresh. (xargs trims the string and turns \t and \n into spaces) +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" == 'Unknown' ] +do + sleep 0.5 +done +start_time="`$CLICKHOUSE_CLIENT -nq "select reinterpret(now64(), 'Int64')"`" +# Check table contents. +$CLICKHOUSE_CLIENT -nq "select '<2: refreshed>', count(), sum(x=0), sum(x=1) from a" +# Wait for table contents to change. +res1="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values'`" +while : +do + res2="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" + [ "$res2" == "$res1" ] || break + sleep 0.5 +done +# Wait for another change. +while : +do + res3="`$CLICKHOUSE_CLIENT -nq 'select * from a order by x format Values -- $LINENO'`" + [ "$res3" == "$res2" ] || break + sleep 0.5 +done +# Check that the two changes were at least 1 second apart, in particular that we're not refreshing +# like crazy. This is potentially flaky, but we need at least one test that uses non-mocked timer +# to make sure the clock+timer code works at all. If it turns out flaky, increase refresh period above. +$CLICKHOUSE_CLIENT -nq " + select '<3: time difference at least>', min2(reinterpret(now64(), 'Int64') - $start_time, 1000); + select '<4: next refresh in>', next_refresh_time-last_refresh_time from refreshes;" + +# Create a source table from which views will read. +$CLICKHOUSE_CLIENT -nq " + create table src (x Int8) engine Memory as select 1;" + +# Switch to fake clock, change refresh schedule, change query. +$CLICKHOUSE_CLIENT -nq " + system test view a set fake time '2050-01-01 00:00:01'; + system wait view a; + system refresh view a; + system wait view a; + select '<4.1: fake clock>', status, last_refresh_time, next_refresh_time from refreshes; + alter table a modify refresh every 2 year; + alter table a modify query select x*2 as x from src; + select '<4.5: altered>', status, last_refresh_result, next_refresh_time from refreshes; + show create a;" +# Advance time to trigger the refresh. +$CLICKHOUSE_CLIENT -nq " + select '<5: no refresh>', count() from a; + system test view a set fake time '2052-02-03 04:05:06';" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_time from refreshes -- $LINENO" | xargs`" != '2052-02-03 04:05:06' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<6: refreshed>', * from a; + select '<7: refreshed>', status, last_refresh_result, next_refresh_time from refreshes;" + +# Create a dependent view, refresh it once. +$CLICKHOUSE_CLIENT -nq " + create materialized view b refresh every 2 year depends on a (y Int32) engine MergeTree order by y empty as select x*10 as y from a; + show create b; + system test view b set fake time '2052-11-11 11:11:11'; + system refresh view b; + system wait view b; + select '<7.5: created dependent>', last_refresh_time from refreshes where view = 'b';" +# Next refresh shouldn't start until the dependency refreshes. +$CLICKHOUSE_CLIENT -nq " + select '<8: refreshed>', * from b; + select '<9: refreshed>', view, status, last_refresh_result, next_refresh_time from refreshes; + system test view b set fake time '2054-01-24 23:22:21';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies 2054-01-01 00:00:00' ] +do + sleep 0.5 +done + +# Drop the source table, check that refresh fails and doesn't leave a temp table behind. +$CLICKHOUSE_CLIENT -nq " + select '<9.2: dropping>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase(); + drop table src; + system refresh view a;" +$CLICKHOUSE_CLIENT -nq "system wait view a;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -nq " + select '<9.4: dropped>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase();" + +# Create the source table again, check that refresh succeeds (in particular that tables are looked +# up by name rather than uuid). +$CLICKHOUSE_CLIENT -nq " + select '<10: creating>', view, status, remaining_dependencies, next_refresh_time from refreshes; + create table src (x Int16) engine Memory as select 2; + system test view a set fake time '2054-01-01 00:00:01';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled' ] +do + sleep 0.5 +done +# Both tables should've refreshed. +$CLICKHOUSE_CLIENT -nq " + select '<11: chain-refreshed a>', * from a; + select '<12: chain-refreshed b>', * from b; + select '<13: chain-refreshed>', view, status, remaining_dependencies, last_refresh_result, last_refresh_time, next_refresh_time, exception == '' from refreshes;" + +# Make the dependent table run ahead by one refresh cycle, make sure it waits for the dependency to +# catch up to the same cycle. +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2059-01-01 00:00:00'; + system refresh view b;" +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2060-01-01 00:00:00' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2061-01-01 00:00:00'; + system test view a set fake time '2057-01-01 00:00:00';" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, next_refresh_time from refreshes -- $LINENO" | xargs`" != 'Scheduled 2058-01-01 00:00:00 WaitingForDependencies 2060-01-01 00:00:00' ] +do + sleep 0.5 +done + +$CLICKHOUSE_CLIENT -nq " + select '<14: waiting for next cycle>', view, status, remaining_dependencies, next_refresh_time from refreshes; + truncate src; + insert into src values (3); + system test view a set fake time '2060-02-02 02:02:02';" +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != '2062-01-01 00:00:00' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<15: chain-refreshed a>', * from a; + select '<16: chain-refreshed b>', * from b; + select '<17: chain-refreshed>', view, status, next_refresh_time from refreshes;" + +# Get to WaitingForDependencies state and remove the depencency. +$CLICKHOUSE_CLIENT -nq " + system test view b set fake time '2062-03-03 03:03:03'" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'b' -- $LINENO" | xargs`" != 'WaitingForDependencies' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + alter table b modify refresh every 2 year" +while [ "`$CLICKHOUSE_CLIENT -nq "select status, last_refresh_time from refreshes where view = 'b' -- $LINENO" | xargs`" != 'Scheduled 2062-03-03 03:03:03' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<18: removed dependency>', view, status, remaining_dependencies, last_refresh_time,next_refresh_time, refresh_count from refreshes where view = 'b'; + show create b;" + +$CLICKHOUSE_CLIENT -nq " + drop table src; + drop table a; + drop table b; + drop table refreshes;" diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference new file mode 100644 index 00000000000..cdaad32de0a --- /dev/null +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.reference @@ -0,0 +1,30 @@ +<19: exception> 1 +<20: unexception> 1 +<21: rename> 1 +<22: rename> d Finished +<23: simple refresh> 1 +<24: rename during refresh> 1 +<25: rename during refresh> f Running +<27: cancelled> f Scheduled Cancelled +<28: drop during refresh> 0 0 +CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 AS x +<29: randomize> 1 1 +CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src +<30: to existing table> 10 +<31: to existing table> 10 +<31: to existing table> 20 +<31.5: will retry> Error 1 +<31.6: did retry> 10 +<32: empty> i Scheduled Unknown 0 +<32: empty> j Scheduled Finished 0 +<34: append> 10 +<35: append> 10 +<35: append> 20 +<35: append> 30 +<36: not append> 20 +<36: not append> 30 +<37: append chain> 100 +<38: append chain> 100 +<38: append chain> 100 +<38: append chain> 200 +creating MergeTree without ORDER BY failed, as expected diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh new file mode 100755 index 00000000000..50a905576d5 --- /dev/null +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views_2.sh @@ -0,0 +1,222 @@ +#!/usr/bin/env bash +# Tags: atomic-database + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# reset --log_comment +CLICKHOUSE_LOG_COMMENT= +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Set session timezone to UTC to make all DateTime formatting and parsing use UTC, because refresh +# scheduling is done in UTC. +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g'`" +CLICKHOUSE_CLIENT="`echo "$CLICKHOUSE_CLIENT --allow_experimental_refreshable_materialized_view=1 --allow_materialized_view_with_bad_select=0 --session_timezone Etc/UTC"`" + +$CLICKHOUSE_CLIENT -nq "create view refreshes as select * from system.view_refreshes where database = '$CLICKHOUSE_DATABASE' order by view" + + +# Select from a table that doesn't exist, get an exception. +$CLICKHOUSE_CLIENT -nq " + create table src (x Int8) engine Memory as select 1; + create materialized view c refresh every 1 second (x Int64) engine Memory empty as select * from src; + drop table src;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes where view = 'c' -- $LINENO" | xargs`" != 'Error' ] +do + sleep 0.5 +done +# Check exception, create src, expect successful refresh. +$CLICKHOUSE_CLIENT -nq " + select '<19: exception>', exception ilike '%UNKNOWN_TABLE%' ? '1' : exception from refreshes where view = 'c'; + create table src (x Int64) engine Memory as select 1; + system refresh view c;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +do + sleep 0.5 +done +# Rename table. +$CLICKHOUSE_CLIENT -nq " + select '<20: unexception>', * from c; + rename table c to d; + select '<21: rename>', * from d; + select '<22: rename>', view, last_refresh_result from refreshes;" + +# Do various things during a refresh. +# First make a nonempty view. +$CLICKHOUSE_CLIENT -nq " + drop table d; + truncate src; + insert into src values (1); + create materialized view e refresh every 1 second (x Int64) engine MergeTree order by x empty as select x + sleepEachRow(1) as x from src settings max_block_size = 1;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +do + sleep 0.5 +done +# Stop refreshes. +$CLICKHOUSE_CLIENT -nq " + select '<23: simple refresh>', * from e; + system stop view e;" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Disabled' ] +do + sleep 0.5 +done +# Make refreshes slow, wait for a slow refresh to start. (We stopped refreshes first to make sure +# we wait for a slow refresh, not a previous fast one.) +$CLICKHOUSE_CLIENT -nq " + insert into src select * from numbers(1000) settings max_block_size=1; + system start view e;" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes -- $LINENO" | xargs`" != 'Running' ] +do + sleep 0.5 +done +# Rename. +$CLICKHOUSE_CLIENT -nq " + rename table e to f; + select '<24: rename during refresh>', * from f; + select '<25: rename during refresh>', view, status from refreshes where view = 'f'; + alter table f modify refresh after 10 year;" + +# Cancel. +$CLICKHOUSE_CLIENT -nq " + system cancel view f;" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Scheduled' ] +do + sleep 0.5 +done +# Check that another refresh doesn't immediately start after the cancelled one. +$CLICKHOUSE_CLIENT -nq " + select '<27: cancelled>', view, status, last_refresh_result from refreshes where view = 'f'; + system refresh view f;" +while [ "`$CLICKHOUSE_CLIENT -nq "select status from refreshes where view = 'f' -- $LINENO" | xargs`" != 'Running' ] +do + sleep 0.5 +done +# Drop. +$CLICKHOUSE_CLIENT -nq " + drop table f; + select '<28: drop during refresh>', view, status from refreshes; + select '<28: drop during refresh>', countIf(name like '%tmp%'), countIf(name like '%.inner%') from system.tables where database = currentDatabase()" + +# Try OFFSET and RANDOMIZE FOR. +$CLICKHOUSE_CLIENT -nq " + create materialized view g refresh every 1 week offset 3 day 4 hour randomize for 4 day 1 hour (x Int64) engine Memory empty as select 42 as x; + show create g; + system test view g set fake time '2050-02-03 15:30:13';" +while [ "`$CLICKHOUSE_CLIENT -nq "select next_refresh_time > '2049-01-01' from refreshes -- $LINENO" | xargs`" != '1' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + with '2050-02-10 04:00:00'::DateTime as expected + select '<29: randomize>', abs(next_refresh_time::Int64 - expected::Int64) <= 3600*(24*4+1), next_refresh_time != expected from refreshes;" + +# Send data 'TO' an existing table. +$CLICKHOUSE_CLIENT -nq " + drop table g; + create table dest (x Int64) engine MergeTree order by x; + truncate src; + insert into src values (1); + create materialized view h refresh every 1 second to dest empty as select x*10 as x from src; + show create h;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result from refreshes -- $LINENO" | xargs`" != 'Finished' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<30: to existing table>', * from dest; + insert into src values (2);" +while [ "`$CLICKHOUSE_CLIENT -nq "select count() from dest -- $LINENO" | xargs`" != '2' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<31: to existing table>', * from dest; + drop table dest; + drop table h;" + +# Retries. +$CLICKHOUSE_CLIENT -nq " + create materialized view h2 refresh after 1 year settings refresh_retries = 10 (x Int64) engine Memory as select x*10 + throwIf(x % 2 == 0) as x from src;" +$CLICKHOUSE_CLIENT -nq "system wait view h2;" 2>/dev/null && echo "SYSTEM WAIT VIEW failed to fail at $LINENO" +$CLICKHOUSE_CLIENT -nq " + select '<31.5: will retry>', last_refresh_result, retry > 0 from refreshes; + create table src2 (x Int8) engine Memory; + insert into src2 values (1); + exchange tables src and src2; + drop table src2;" +while [ "`$CLICKHOUSE_CLIENT -nq "select last_refresh_result, retry from refreshes -- $LINENO" | xargs`" != 'Finished 0' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<31.6: did retry>', x from h2; + drop table h2" + +# EMPTY +$CLICKHOUSE_CLIENT -nq " + create materialized view i refresh after 1 year engine Memory empty as select number as x from numbers(2); + create materialized view j refresh after 1 year engine Memory as select number as x from numbers(2);" +while [ "`$CLICKHOUSE_CLIENT -nq "select sum(last_success_time is null) from refreshes -- $LINENO" | xargs`" == '2' ] +do + sleep 0.5 +done +$CLICKHOUSE_CLIENT -nq " + select '<32: empty>', view, status, last_refresh_result, retry from refreshes order by view; + drop table i; + drop table j;" + +# APPEND +$CLICKHOUSE_CLIENT -nq " + create materialized view k refresh every 10 year append (x Int64) engine Memory empty as select x*10 as x from src; + select '<33: append>', * from k; + system refresh view k; + system wait view k; + select '<34: append>', * from k; + truncate table src; + insert into src values (2), (3); + system refresh view k; + system wait view k; + select '<35: append>', * from k order by x;" +# ALTER to non-APPEND +$CLICKHOUSE_CLIENT -nq " + alter table k modify refresh every 10 year; + system wait view k; + system refresh view k; + system wait view k; + select '<36: not append>', * from k order by x; + drop table k; + truncate table src;" + +# APPEND + TO + regular materialized view reading from it. +$CLICKHOUSE_CLIENT -nq " + create table mid (x Int64) engine MergeTree order by x; + create materialized view l refresh every 10 year append to mid empty as select x*10 as x from src; + create materialized view m (x Int64) engine Memory as select x*10 as x from mid; + insert into src values (1); + system refresh view l; + system wait view l; + select '<37: append chain>', * from m; + insert into src values (2); + system refresh view l; + system wait view l; + select '<38: append chain>', * from m order by x; + drop table l; + drop table m; + drop table mid;" + +# Failing to create inner table. +$CLICKHOUSE_CLIENT -nq " + create materialized view n refresh every 1 second (x Int64) engine MergeTree as select 1 as x from numbers(2);" 2>/dev/null || echo "creating MergeTree without ORDER BY failed, as expected" +$CLICKHOUSE_CLIENT -nq " + create materialized view n refresh every 1 second (x Int64) engine MergeTree order by x as select 1 as x from numbers(2); + drop table n;" + +# Reading from table that doesn't exist yet. +$CLICKHOUSE_CLIENT -nq " + create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } + create materialized view o (x Int64) engine Memory as select x from nonexist; -- { serverError UNKNOWN_TABLE } + create materialized view o (x Int64) engine Memory as select x from nope.nonexist; -- { serverError UNKNOWN_DATABASE } + create materialized view o refresh every 1 second (x Int64) engine Memory as select x from nope.nonexist settings allow_materialized_view_with_bad_select = 1; + drop table o;" + +$CLICKHOUSE_CLIENT -nq " + drop table refreshes;" diff --git a/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference b/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference index 17a25d82824..0f64d0393b2 100644 --- a/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference +++ b/tests/queries/0_stateless/02933_change_cache_setting_without_restart.reference @@ -1,7 +1,7 @@ -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 10 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 5 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 15 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 2 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 1000 0 16 -134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 10 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 5 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 15 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 2 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 1000 0 16 +134217728 10000000 33554432 1 4194304 1 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02933/ 0 0 0 16 diff --git a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh index b587549cb60..2b78746ae2c 100755 --- a/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh +++ b/tests/queries/0_stateless/02933_replicated_database_forbid_create_as_select.sh @@ -12,15 +12,15 @@ ${CLICKHOUSE_CLIENT} --query "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = # Non-replicated engines are allowed ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test (id UInt64) ENGINE = MergeTree() ORDER BY id AS SELECT 1" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv (id UInt64) ENGINE = MergeTree() ORDER BY id POPULATE AS SELECT 1" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv (id UInt64) ENGINE = MergeTree() ORDER BY id POPULATE AS SELECT 1 AS id" # Replicated storafes are forbidden ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" |& grep -cm1 "SUPPORT_IS_DISABLED" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1 AS id" |& grep -cm1 "SUPPORT_IS_DISABLED" # POPULATE is allowed with the special setting -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" --database_replicated_allow_heavy_create=1 -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv3 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1" --compatibility='24.6' +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1 AS id" --database_replicated_allow_heavy_create=1 +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE MATERIALIZED VIEW ${CLICKHOUSE_DATABASE}_db.test_mv3 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id POPULATE AS SELECT 1 AS id" --compatibility='24.6' # AS SELECT is forbidden even with the setting ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.test2 (id UInt64) ENGINE = ReplicatedMergeTree ORDER BY id AS SELECT 1" --database_replicated_allow_heavy_create=1 |& grep -cm1 "SUPPORT_IS_DISABLED" diff --git a/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference b/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference index 3455adc8723..f100e8c48d4 100644 --- a/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference +++ b/tests/queries/0_stateless/02935_format_with_arbitrary_types.reference @@ -34,6 +34,7 @@ The answer to all questions is 2023-11-14 05:50:12.123. The answer to all questions is hallo. The answer to all questions is [\'foo\',\'bar\']. The answer to all questions is {"foo":"bar"}. +The answer to all questions is {"foo":"bar"}. The answer to all questions is (42,\'foo\'). The answer to all questions is {42:\'foo\'}. The answer to all questions is 122.233.64.201. diff --git a/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql b/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql index ad1de2bec6d..dcc3964e4b0 100644 --- a/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql +++ b/tests/queries/0_stateless/02935_format_with_arbitrary_types.sql @@ -3,6 +3,7 @@ -- no-fasttest: json type needs rapidjson library, geo types need s2 geometry SET allow_experimental_object_type = 1; +SET allow_experimental_json_type = 1; SET allow_suspicious_low_cardinality_types=1; SELECT '-- Const string + non-const arbitrary type'; @@ -40,6 +41,7 @@ SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11 SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'Europe/Amsterdam'))); SELECT format('The {0} to all questions is {1}.', 'answer', materialize('hallo' :: Enum('hallo' = 1))); SELECT format('The {0} to all questions is {1}.', 'answer', materialize(['foo', 'bar'] :: Array(String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('{"foo": "bar"}' :: Object('json'))); SELECT format('The {0} to all questions is {1}.', 'answer', materialize('{"foo": "bar"}' :: JSON)); SELECT format('The {0} to all questions is {1}.', 'answer', materialize((42, 'foo') :: Tuple(Int32, String))); SELECT format('The {0} to all questions is {1}.', 'answer', materialize(map(42, 'foo') :: Map(Int32, String))); diff --git a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference index 298cc908178..c6bbcdc20c2 100644 --- a/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference +++ b/tests/queries/0_stateless/02944_dynamically_change_filesystem_cache_size.reference @@ -1,20 +1,20 @@ -100 10 10 10 0 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +100 10 10 1 10 0 0 0 0 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 0 10 98 set max_size from 100 to 10 -10 10 10 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +10 10 10 1 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 1 8 set max_size from 10 to 100 -100 10 10 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +100 10 10 1 10 0 0 8 1 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 10 98 set max_elements from 10 to 2 -100 2 10 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +100 2 10 1 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 2 18 set max_elements from 2 to 10 -100 10 10 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 +100 10 10 1 10 0 0 18 2 /var/lib/clickhouse/filesystem_caches/s3_cache_02944/ 0 5000 0 16 10 98 diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh index e954cb0e78e..c4b44ce11c5 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_3.sh @@ -30,7 +30,7 @@ SETTINGS enable_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=0) r @@ -55,7 +55,7 @@ SETTINGS enable_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r @@ -81,7 +81,7 @@ SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r @@ -106,7 +106,7 @@ SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='full_sorting_merge'" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='hash') r @@ -131,7 +131,7 @@ SETTINGS enable_analyzer=1, parallel_replicas_prefer_local_join=0, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', join_algorithm='hash'" -$CLICKHOUSE_CLIENT -q " +$CLICKHOUSE_CLIENT --max_rows_in_set_to_optimize_join 0 -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings join_algorithm='full_sorting_merge') r diff --git a/tests/queries/0_stateless/02969_mysql_cast_type_aliases.reference b/tests/queries/0_stateless/02969_mysql_cast_type_aliases.reference index 5555c918500..3e63763d544 100644 --- a/tests/queries/0_stateless/02969_mysql_cast_type_aliases.reference +++ b/tests/queries/0_stateless/02969_mysql_cast_type_aliases.reference @@ -7,7 +7,7 @@ Decimal 45 Decimal(10, 0) Decimal(M) 46 Decimal(4, 0) Decimal(M, D) 47.21 Decimal(4, 2) Double 48.11 Float64 -JSON {"foo":"bar"} Object(\'json\') +JSON {"foo":"bar"} JSON Real 49.22 Float32 Signed 50 Int64 Unsigned 52 UInt64 @@ -21,7 +21,7 @@ Decimal 45 Decimal(10, 0) Decimal(M) 46 Decimal(4, 0) Decimal(M, D) 47.21 Decimal(4, 2) Double 48.11 Float64 -JSON {"foo":"bar"} Object(\'json\') +JSON {"foo":"bar"} JSON Real 49.22 Float32 Signed 50 Int64 Unsigned 52 UInt64 diff --git a/tests/queries/0_stateless/02969_mysql_cast_type_aliases.sql b/tests/queries/0_stateless/02969_mysql_cast_type_aliases.sql index 7b5735cdebc..8cccde4b0ab 100644 --- a/tests/queries/0_stateless/02969_mysql_cast_type_aliases.sql +++ b/tests/queries/0_stateless/02969_mysql_cast_type_aliases.sql @@ -1,7 +1,7 @@ -- See https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast -- Tests are in order of the type appearance in the docs -SET allow_experimental_object_type = 1; +SET allow_experimental_json_type = 1; SELECT '-- Uppercase tests'; -- Not supported as it is translated to FixedString without arguments diff --git a/tests/queries/0_stateless/02995_index_10.sh b/tests/queries/0_stateless/02995_index_10.sh index 813cc49cbd8..e7e7d3c3b42 100755 --- a/tests/queries/0_stateless/02995_index_10.sh +++ b/tests/queries/0_stateless/02995_index_10.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -37,8 +37,9 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String -HAVING count() > 0; -" +HAVING count() > 0 +SETTINGS trace_profile_events=0 -- test is too slow with profiling +;" done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} "DROP TABLE test" +${CLICKHOUSE_CLIENT} -q "DROP TABLE test" diff --git a/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql index 8ccc3cf61da..6714a069246 100644 --- a/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql +++ b/tests/queries/0_stateless/03002_filter_skip_virtual_columns_with_non_deterministic_functions.sql @@ -1,3 +1,5 @@ +-- Tags: long +SET max_rows_to_read = 0; create table test (number UInt64) engine=MergeTree order by number; insert into test select * from numbers(50000000); select ignore(number) from test where RAND() > 4292390314 limit 10; diff --git a/tests/queries/0_stateless/03015_peder1001.sql b/tests/queries/0_stateless/03015_peder1001.sql index 810503207f2..df8e4db1536 100644 --- a/tests/queries/0_stateless/03015_peder1001.sql +++ b/tests/queries/0_stateless/03015_peder1001.sql @@ -1,3 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + DROP TABLE IF EXISTS test_data; CREATE TABLE test_data diff --git a/tests/queries/0_stateless/03033_dynamic_text_serialization.reference b/tests/queries/0_stateless/03033_dynamic_text_serialization.reference index 9fc356cc5e6..f949d5e9baf 100644 --- a/tests/queries/0_stateless/03033_dynamic_text_serialization.reference +++ b/tests/queries/0_stateless/03033_dynamic_text_serialization.reference @@ -4,7 +4,7 @@ JSON {"d":"str","dynamicType(d)":"String"} {"d":["1","2","3"],"dynamicType(d)":"Array(Int64)"} {"d":"2020-01-01","dynamicType(d)":"Date"} -{"d":"2020-01-01 10:00:00.000000000","dynamicType(d)":"DateTime64(9)"} +{"d":"2020-01-01 10:00:00","dynamicType(d)":"DateTime"} {"d":{"a":"42","b":"str"},"dynamicType(d)":"Tuple(a Int64, b String)"} {"d":{"a":"43"},"dynamicType(d)":"Tuple(a Int64)"} {"d":{"a":"44","c":["1","2","3"]},"dynamicType(d)":"Tuple(a Int64, c Array(Int64))"} @@ -22,7 +22,7 @@ CSV "str","String" "[1,2,3]","Array(Int64)" "2020-01-01","Date" -"2020-01-01 10:00:00.000000000","DateTime64(9)" +"2020-01-01 10:00:00","DateTime" "[1, 'str', [1, 2, 3]]","String" \N,"None" true,"Bool" @@ -32,24 +32,24 @@ TSV str String [1,2,3] Array(Int64) 2020-01-01 Date -2020-01-01 10:00:00.000000000 DateTime64(9) +2020-01-01 10:00:00 DateTime [1, \'str\', [1, 2, 3]] String \N None true Bool Values -(42,'Int64'),(42.42,'Float64'),('str','String'),([1,2,3],'Array(Int64)'),('2020-01-01','Date'),('2020-01-01 10:00:00.000000000','DateTime64(9)'),(NULL,'None'),(true,'Bool') +(42,'Int64'),(42.42,'Float64'),('str','String'),([1,2,3],'Array(Int64)'),('2020-01-01','Date'),('2020-01-01 10:00:00','DateTime'),(NULL,'None'),(true,'Bool') Cast using parsing 42 Int64 42.42 Float64 [1,2,3] Array(Int64) 2020-01-01 Date -2020-01-01 10:00:00.000000000 DateTime64(9) +2020-01-01 10:00:00 DateTime NULL String true Bool 42 Int64 false 42.42 Float64 false [1,2,3] Array(Int64) false 2020-01-01 Date true -2020-01-01 10:00:00.000000000 DateTime64(9) true +2020-01-01 10:00:00 DateTime true NULL String true true Bool true diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.sql b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.sql index ddfba4418bd..822393d3c78 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_compact_merge_tree.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None) set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.sql b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.sql index 5aac5f7b72f..2394893dc8b 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_wide_merge_tree.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None) set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql index fb23e15738e..9bd2aee06ae 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql @@ -1,4 +1,6 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + set allow_experimental_dynamic_type=1; drop table if exists test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql index c098a3191e0..ee2dadd308c 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql @@ -1,4 +1,6 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + set allow_experimental_dynamic_type=1; drop table if exists test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql index 17b1e451143..6c2ce8f9e6a 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql @@ -1,4 +1,6 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + set allow_experimental_dynamic_type=1; drop table if exists test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql index fd6c0109263..2350cddd21c 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql @@ -1,4 +1,6 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + set allow_experimental_dynamic_type=1; drop table if exists test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql index fa64ed2f8fd..7f1934091f2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_compact_merge_tree.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) set allow_experimental_dynamic_type = 1; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql index 4b8a036f166..f1f387fae9d 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_horizontal_wide_merge_tree.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) set allow_experimental_dynamic_type = 1; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql index a4e67de76db..cc11c454d38 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_compact_merge_tree.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) set allow_experimental_dynamic_type = 1; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql index dd643f8dffd..ffb2aca8b35 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_2_vertical_wide_merge_tree.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) set allow_experimental_dynamic_type = 1; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 b/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 index 3778399d0a4..71d6fc2540c 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 +++ b/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 @@ -2,6 +2,7 @@ set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; set allow_experimental_dynamic_type = 1; + drop table if exists test; {% for engine in ['MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000', diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql index 8ba192cb5db..e3b8ea63582 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql index 1ea7eefdd53..db11dfc93e2 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql index c6a09036c30..4ed4d00fe95 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql index c1964c45d98..2f8b258ba8e 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql @@ -1,4 +1,5 @@ -- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.reference b/tests/queries/0_stateless/03143_asof_join_ddb_long.reference index 2850a8aba98..ae7f7c805f2 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.reference +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.reference @@ -1,2 +1,2 @@ -49999983751397 10000032 -49999983751397 10000032 +7999995751397 4000032 +7999995751397 4000032 diff --git a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql index 4b211a6a1e1..c93e6618ba9 100644 --- a/tests/queries/0_stateless/03143_asof_join_ddb_long.sql +++ b/tests/queries/0_stateless/03143_asof_join_ddb_long.sql @@ -12,7 +12,7 @@ AS toDateTime('1990-03-21 13:00:00') + INTERVAL number MINUTE AS begin, number % 4 AS key, number AS value - FROM numbers(0, 10000000); + FROM numbers(0, 4000000); CREATE TABLE skewed_probe ENGINE = MergeTree ORDER BY (key, begin) AS @@ -34,8 +34,9 @@ AS SELECT toDateTime('1990-03-21 13:00:01') + INTERVAL number MINUTE AS begin, 3 AS key - FROM numbers(0, 10000000); + FROM numbers(0, 4000000); +SET max_rows_to_read = 0; SELECT SUM(value), COUNT(*) FROM skewed_probe diff --git a/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh b/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh index 6f70a0d2536..7f606d889a7 100755 --- a/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh +++ b/tests/queries/0_stateless/03149_numbers_max_block_size_zero.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash +# shellcheck disable=SC2266 CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "SELECT count(*) FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c SETTINGS max_block_size = 0" 2>&1 | grep -q "Sanity check: 'max_block_size' cannot be 0. Set to default value" && echo "OK" || echo "FAIL" +$CLICKHOUSE_CLIENT -q "SELECT count(*) FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c SETTINGS max_block_size = 0" 2>&1 | + [ "$(grep -c "Sanity check: 'max_block_size' cannot be 0. Set to default value")" -gt 0 ] && echo "OK" || echo "FAIL" diff --git a/tests/queries/0_stateless/03150_infer_type_variant.reference b/tests/queries/0_stateless/03150_infer_type_variant.reference new file mode 100644 index 00000000000..a43fa1e1227 --- /dev/null +++ b/tests/queries/0_stateless/03150_infer_type_variant.reference @@ -0,0 +1,31 @@ + ┏━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ + ┃ arr ┃ toTypeName(arr) ┃ + ┡━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +1. │ ['1','Hello',(32)] │ Array(Variant(String, Tuple( + a Nullable(Int64)))) │ + └────────────────────┴──────────────────────────────────────────────────────┘ + ┏━━━━━━━┳━━━━━━━━━━━━━━━━━━┓ + ┃ x ┃ toTypeName(x) ┃ + ┡━━━━━━━╇━━━━━━━━━━━━━━━━━━┩ +1. │ 42 │ Nullable(String) │ + ├───────┼──────────────────┤ +2. │ Hello │ Nullable(String) │ + └───────┴──────────────────┘ + ┏━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ + ┃ x ┃ toTypeName(x) ┃ + ┡━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +1. │ [1,2,3] │ Variant(Array(Nullable(Int64)), Tuple( + a Nullable(Int64))) │ + ├─────────┼───────────────────────────────────────────────────────────────┤ +2. │ (42) │ Variant(Array(Nullable(Int64)), Tuple( + a Nullable(Int64))) │ + └─────────┴───────────────────────────────────────────────────────────────┘ + ┏━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ + ┃ c1 ┃ toTypeName(c1) ┃ c2 ┃ toTypeName(c2) ┃ + ┡━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +1. │ 1 │ Nullable(Int64) │ Hello World! │ Variant(Array(Nullable(Int64)), String) │ + ├────┼─────────────────┼──────────────┼─────────────────────────────────────────┤ +2. │ 2 │ Nullable(Int64) │ [1,2,3] │ Variant(Array(Nullable(Int64)), String) │ + ├────┼─────────────────┼──────────────┼─────────────────────────────────────────┤ +3. │ 3 │ Nullable(Int64) │ 2020-01-01 │ Variant(Array(Nullable(Int64)), String) │ + └────┴─────────────────┴──────────────┴─────────────────────────────────────────┘ diff --git a/tests/queries/0_stateless/03150_infer_type_variant.sql b/tests/queries/0_stateless/03150_infer_type_variant.sql new file mode 100644 index 00000000000..45126ccd471 --- /dev/null +++ b/tests/queries/0_stateless/03150_infer_type_variant.sql @@ -0,0 +1,5 @@ +SET input_format_try_infer_variants=1; +SELECT arr, toTypeName(arr) FROM format('JSONEachRow', '{"arr" : [1, "Hello", {"a" : 32}]}') FORMAT Pretty; +SELECT x, toTypeName(x) FROM format('JSONEachRow', '{"x" : 42}, {"x" : "Hello"}') FORMAT Pretty; +SELECT x, toTypeName(x) FROM format('JSONEachRow', '{"x" : [1, 2, 3]}, {"x" : {"a" : 42}}') FORMAT Pretty; +SELECT c1, toTypeName(c1), c2, toTypeName(c2) FROM format('CSV', '1,Hello World!\n2,"[1,2,3]"\n3,"2020-01-01"\n') FORMAT Pretty; \ No newline at end of file diff --git a/tests/queries/0_stateless/03157_dynamic_type_json.reference b/tests/queries/0_stateless/03157_dynamic_type_json.reference index 38bca12bb95..14e851bdbc7 100644 --- a/tests/queries/0_stateless/03157_dynamic_type_json.reference +++ b/tests/queries/0_stateless/03157_dynamic_type_json.reference @@ -1,5 +1,5 @@ -1 (((((((((('deep_value')))))))))) -2 (((((((((('deep_array_value')))))))))) +1 {"level1":{"level2":{"level3":{"level4":{"level5":{"level6":{"level7":{"level8":{"level9":{"level10":"deep_value"}}}}}}}}}} +2 {"level1":{"level2":{"level3":{"level4":{"level5":{"level6":{"level7":{"level8":{"level9":{"level10":"deep_array_value"}}}}}}}}}} -(((((((((('deep_value')))))))))) Tuple(level1 Tuple(level2 Tuple(level3 Tuple(level4 Tuple(level5 Tuple(level6 Tuple(level7 Tuple(level8 Tuple(level9 Tuple(level10 String)))))))))) -(((((((((('deep_array_value')))))))))) Tuple(level1 Tuple(level2 Tuple(level3 Tuple(level4 Tuple(level5 Tuple(level6 Tuple(level7 Tuple(level8 Tuple(level9 Tuple(level10 String)))))))))) +{"level1":{"level2":{"level3":{"level4":{"level5":{"level6":{"level7":{"level8":{"level9":{"level10":"deep_value"}}}}}}}}}} JSON +{"level1":{"level2":{"level3":{"level4":{"level5":{"level6":{"level7":{"level8":{"level9":{"level10":"deep_array_value"}}}}}}}}}} JSON diff --git a/tests/queries/0_stateless/03157_dynamic_type_json.sql b/tests/queries/0_stateless/03157_dynamic_type_json.sql index cb1a5987104..91af7942718 100644 --- a/tests/queries/0_stateless/03157_dynamic_type_json.sql +++ b/tests/queries/0_stateless/03157_dynamic_type_json.sql @@ -1,7 +1,8 @@ SET allow_experimental_dynamic_type=1; -SET allow_experimental_object_type=1; +SET allow_experimental_json_type=1; SET allow_experimental_variant_type=1; +DROP TABLE IF EXISTS test_deep_nested_json; CREATE TABLE test_deep_nested_json (i UInt16, d JSON) ENGINE = Memory; INSERT INTO test_deep_nested_json VALUES (1, '{"level1": {"level2": {"level3": {"level4": {"level5": {"level6": {"level7": {"level8": {"level9": {"level10": "deep_value"}}}}}}}}}}'); @@ -11,3 +12,4 @@ SELECT * FROM test_deep_nested_json ORDER BY i; SELECT ''; SELECT d::Dynamic d1, dynamicType(d1) FROM test_deep_nested_json ORDER BY i; +DROP TABLE test_deep_nested_json; diff --git a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql index 20a9e17a148..a18f985f217 100644 --- a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql +++ b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql @@ -1,5 +1,4 @@ SET allow_experimental_dynamic_type=1; -SET allow_experimental_object_type=1; SET allow_experimental_variant_type=1; CREATE TABLE test_variable (v Variant(String, UInt32, IPv6, Bool, DateTime64)) ENGINE = Memory; diff --git a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql index fffea1bd0f5..28b679e2214 100644 --- a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql +++ b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql @@ -1,7 +1,6 @@ -- Tags: no-random-settings SET allow_experimental_dynamic_type=1; -SET allow_experimental_object_type=1; SET allow_experimental_variant_type=1; SET allow_suspicious_low_cardinality_types=1; diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index c5a6cbab0bc..8edf541c2a0 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -1,2 +1,90 @@ -1231 John 33 +compact part +testing throw default mode +-- { echoOn } + +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; +DELETE FROM users_compact WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } +SELECT 'testing drop mode'; +testing drop mode +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; +DELETE FROM users_compact WHERE uid = 1231; +SELECT * FROM users_compact ORDER BY uid; +SYSTEM FLUSH LOGS; +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +all_1_1_0_2 +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +SELECT 'testing rebuild mode'; +testing rebuild mode +INSERT INTO users_compact VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +DELETE FROM users_compact WHERE uid = 6666; +SELECT * FROM users_compact ORDER BY uid; 8888 Alice 50 +SYSTEM FLUSH LOGS; +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +all_1_1_0_4 +all_3_3_0_4 +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1) AND parent_name like 'all_3_3%'; +p1 all_3_3_0_4 +p2 all_3_3_0_4 +wide part +testing throw default mode +-- { echoOn } + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; +DELETE FROM users_wide WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } +SELECT 'testing drop mode'; +testing drop mode +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; +DELETE FROM users_wide WHERE uid = 1231; +SELECT * FROM users_wide ORDER BY uid; +SYSTEM FLUSH LOGS; +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +all_1_1_0_2 +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +SELECT 'testing rebuild mode'; +testing rebuild mode +INSERT INTO users_wide VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +DELETE FROM users_wide WHERE uid = 6666; +SELECT * FROM users_wide ORDER BY uid; +8888 Alice 50 +SYSTEM FLUSH LOGS; +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +all_1_1_0_4 +all_3_3_0_4 +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1) AND parent_name like 'all_3_3%'; +p1 all_3_3_0_4 +p2 all_3_3_0_4 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index b189388e356..da6427cbf22 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,31 +1,145 @@ +-- For cloud version, should also consider min_bytes_for_full_part_storage since packed storage exists, +-- but for less redundancy, just let CI test the parameter. -DROP TABLE IF EXISTS users; +SET lightweight_deletes_sync = 2, alter_sync = 2; -CREATE TABLE users ( +DROP TABLE IF EXISTS users_compact; + + +SELECT 'compact part'; + +CREATE TABLE users_compact ( uid Int16, name String, age Int16, projection p1 (select count(), age group by age), projection p2 (select age, name group by age, name) -) ENGINE = MergeTree order by uid; +) ENGINE = MergeTree order by uid +SETTINGS min_bytes_for_wide_part = 10485760; -INSERT INTO users VALUES (1231, 'John', 33); -INSERT INTO users VALUES (6666, 'Ksenia', 48); -INSERT INTO users VALUES (8888, 'Alice', 50); +INSERT INTO users_compact VALUES (1231, 'John', 33); -DELETE FROM users WHERE 1; -- { serverError NOT_IMPLEMENTED } +SELECT 'testing throw default mode'; -DELETE FROM users WHERE uid = 8888 SETTINGS lightweight_mutation_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } +-- { echoOn } -DELETE FROM users WHERE uid = 6666 SETTINGS lightweight_mutation_projection_mode = 'drop'; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; + +DELETE FROM users_compact WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } + +SELECT 'testing drop mode'; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; + +DELETE FROM users_compact WHERE uid = 1231; + +SELECT * FROM users_compact ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); -- expecting no projection SELECT - name, - `table` + name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users'); +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); -SELECT * FROM users ORDER BY uid; +SELECT 'testing rebuild mode'; +INSERT INTO users_compact VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); -DROP TABLE users; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; + +DELETE FROM users_compact WHERE uid = 6666; + +SELECT * FROM users_compact ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); + +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1) AND parent_name like 'all_3_3%'; + +-- { echoOff } + +DROP TABLE users_compact; + + +SELECT 'wide part'; +CREATE TABLE users_wide ( + uid Int16, + name String, + age Int16, + projection p1 (select count(), age group by age), + projection p2 (select age, name group by age, name) +) ENGINE = MergeTree order by uid +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO users_wide VALUES (1231, 'John', 33); + +SELECT 'testing throw default mode'; + +-- { echoOn } + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; + +DELETE FROM users_wide WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } + +SELECT 'testing drop mode'; +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; + +DELETE FROM users_wide WHERE uid = 1231; + +SELECT * FROM users_wide ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +SELECT 'testing rebuild mode'; +INSERT INTO users_wide VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; + +DELETE FROM users_wide WHERE uid = 6666; + +SELECT * FROM users_wide ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1) AND parent_name like 'all_3_3%'; + +-- { echoOff } + +DROP TABLE users_wide; diff --git a/tests/queries/0_stateless/03170_ecs_crash.reference b/tests/queries/0_stateless/03170_ecs_crash.reference new file mode 100644 index 00000000000..acd7c60768b --- /dev/null +++ b/tests/queries/0_stateless/03170_ecs_crash.reference @@ -0,0 +1,4 @@ +1 2 3 +4 5 6 +7 8 9 +0 0 0 diff --git a/tests/queries/0_stateless/03170_ecs_crash.sh b/tests/queries/0_stateless/03170_ecs_crash.sh new file mode 100755 index 00000000000..fa6870c4cf2 --- /dev/null +++ b/tests/queries/0_stateless/03170_ecs_crash.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Previous versions crashed in attempt to use this authentication method (regardless of whether it was able to authenticate): +AWS_CONTAINER_CREDENTIALS_FULL_URI=http://localhost:1338/latest/meta-data/container/security-credentials $CLICKHOUSE_LOCAL -q "select * from s3('http://localhost:11111/test/a.tsv')" diff --git a/tests/queries/0_stateless/03199_json_extract_dynamic.reference b/tests/queries/0_stateless/03199_json_extract_dynamic.reference index 759b7763cd1..955106946ea 100644 --- a/tests/queries/0_stateless/03199_json_extract_dynamic.reference +++ b/tests/queries/0_stateless/03199_json_extract_dynamic.reference @@ -12,7 +12,7 @@ Hello String [1,2,3] Array(Nullable(Int64)) ['str1','str2','str3'] Array(Nullable(String)) [[[1],[2,3,4]],[[5,6],[7]]] Array(Array(Array(Nullable(Int64)))) -['2020-01-01 00:00:00.000000000','2020-01-01 00:00:00.000000000'] Array(Nullable(DateTime64(9))) +['2020-01-01 00:00:00','2020-01-01 00:00:00'] Array(Nullable(DateTime)) ['2020-01-01','2020-01-01 date'] Array(Nullable(String)) ['2020-01-01','2020-01-01 00:00:00','str'] Array(Nullable(String)) ['2020-01-01','2020-01-01 00:00:00','42'] Array(Nullable(String)) diff --git a/tests/queries/0_stateless/03205_json_cast_from_string.reference b/tests/queries/0_stateless/03205_json_cast_from_string.reference new file mode 100644 index 00000000000..b9ac477eef4 --- /dev/null +++ b/tests/queries/0_stateless/03205_json_cast_from_string.reference @@ -0,0 +1,18 @@ +{} +{"a":"42","b":"Hello"} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} +{"a":{"b":{"c":{"d":true},"e":"43"},"f":"44"},"g":"44"} +{"a":{"b":{"e":"43"},"f":"44"},"g":"44"} +{"a":{"b":{"e":"43"},"f":"44"},"g":"44"} +{"a":{"f":"44"},"g":"44"} +{"g":"44"} +{"a":{"f":"44"},"g":"44"} +{"g":"44"} +{} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64','a.b.e':'Int64'} {'a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64'} {'a.b.e':'Int64','a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64','a.b.e':'Int64'} {'a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {'a.b.c.d':'Int64'} {'a.b.e':'Int64','a.f':'Int64','g':'Int64'} +{"a":{"b":{"c":{"d":"42"},"e":"43"},"f":"44"},"g":"44"} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} {} {'a.b.c.d':'Int64','a.b.e':'Int64','a.f':'Int64','g':'Int64'} diff --git a/tests/queries/0_stateless/03205_json_cast_from_string.sql b/tests/queries/0_stateless/03205_json_cast_from_string.sql new file mode 100644 index 00000000000..5ceee134c51 --- /dev/null +++ b/tests/queries/0_stateless/03205_json_cast_from_string.sql @@ -0,0 +1,22 @@ +-- Tags: no-fasttest +set allow_experimental_json_type=1; + +select materialize('{}')::JSON; +select materialize('{"a" : 42, "b" : "Hello"}')::JSON; +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON; +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(a.b.c.d Bool); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b.c.d); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b.c); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*a.*b'); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*a.*'); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*'); + +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); diff --git a/tests/queries/0_stateless/03205_json_syntax.reference b/tests/queries/0_stateless/03205_json_syntax.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03205_json_syntax.sql b/tests/queries/0_stateless/03205_json_syntax.sql new file mode 100644 index 00000000000..e3c88c81d0d --- /dev/null +++ b/tests/queries/0_stateless/03205_json_syntax.sql @@ -0,0 +1,40 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type=1; +drop table if exists test; +create table test (json JSON) engine=Memory; +drop table test; +create table test (json JSON(max_dynamic_paths=10)) engine=Memory; +drop table test; +create table test (json JSON(max_dynamic_types=10)) engine=Memory; +drop table test; +create table test (json JSON(a UInt32)) engine=Memory; +drop table test; +create table test (json JSON(aaaaa UInt32)) engine=Memory; +drop table test; +create table test (json JSON(`a b c d` UInt32)) engine=Memory; +drop table test; +create table test (json JSON(a.b.c UInt32)) engine=Memory; +drop table test; +create table test (json JSON(aaaa.b.cccc UInt32)) engine=Memory; +drop table test; +create table test (json JSON(`some path`.`path some` UInt32)) engine=Memory; +drop table test; +create table test (json JSON(a.b.c Tuple(d UInt32, e UInt32))) engine=Memory; +drop table test; +create table test (json JSON(SKIP a)) engine=Memory; +drop table test; +create table test (json JSON(SKIP aaaa)) engine=Memory; +drop table test; +create table test (json JSON(SKIP `a b c d`)) engine=Memory; +drop table test; +create table test (json JSON(SKIP a.b.c)) engine=Memory; +drop table test; +create table test (json JSON(SKIP aaaa.b.cccc)) engine=Memory; +drop table test; +create table test (json JSON(SKIP `some path`.`path some`)) engine=Memory; +drop table test; +create table test (json JSON(SKIP REGEXP '.*a.*')) engine=Memory; +drop table test; +create table test (json JSON(max_dynamic_paths=10, max_dynamic_types=10, a.b.c UInt32, b.c.d String, SKIP g.d.a, SKIP o.g.a, SKIP REGEXP '.*u.*', SKIP REGEXP 'abc')) engine=Memory; +drop table test; diff --git a/tests/queries/0_stateless/03205_system_sync_replica_format.reference b/tests/queries/0_stateless/03205_system_sync_replica_format.reference new file mode 100644 index 00000000000..aad51dd90b0 --- /dev/null +++ b/tests/queries/0_stateless/03205_system_sync_replica_format.reference @@ -0,0 +1 @@ +SYSTEM SYNC REPLICA db.`table` LIGHTWEIGHT diff --git a/tests/queries/0_stateless/03205_system_sync_replica_format.sql b/tests/queries/0_stateless/03205_system_sync_replica_format.sql new file mode 100644 index 00000000000..329bce80afc --- /dev/null +++ b/tests/queries/0_stateless/03205_system_sync_replica_format.sql @@ -0,0 +1 @@ +SELECT formatQuery('SYSTEM SYNC REPLICA db.table LIGHTWEIGHT'); diff --git a/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference b/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference new file mode 100644 index 00000000000..75e55e0376d --- /dev/null +++ b/tests/queries/0_stateless/03206_json_parsing_and_formatting.reference @@ -0,0 +1,195 @@ +JSON with no arguments +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','e':'String'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} +{'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {'a.b.c':'Int64','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {} +1 2020-01-01 {"e":{"f":["s1","s2"]}} +2 [1,2,3] {"e":{"g":"43"}} +3 \N {} +4 \N {} +5 ['b1','b2'] {"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}} +JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP '.*h.*') +{"a":{"b":{"c":1,"d":[false,true]}},"b":"2020-01-01"} +{"a":{"b":{"c":2,"d":[true,true]}},"b":["1","2","3"]} +{"a":{"b":{"c":3,"d":[true,true]}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":[true,true]}}} +{"a":{"b":{"c":5,"d":[true,true]}},"b":["b1","b2"]} +{'a.b':'Tuple(c UInt32, d Array(Bool))','b':'Date'} {'b':'Date'} {} +{'a.b':'Tuple(c UInt32, d Array(Bool))','b':'Array(Nullable(Int64))'} {'b':'Array(Nullable(Int64))'} {} +{'a.b':'Tuple(c UInt32, d Array(Bool))','e':'String'} {'e':'String'} {} +{'a.b':'Tuple(c UInt32, d Array(Bool))'} {} {} +{'a.b':'Tuple(c UInt32, d Array(Bool))','b':'Array(Nullable(String))'} {'b':'Array(Nullable(String))'} {} +JSON(a.b.c UInt32, max_dynamic_paths=2) +{"a":{"b":{"c":1,"d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.d':'Array(Nullable(Int64))','b':'Date'} {'c':'Int64','d.e.f':'Array(Nullable(String))'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))'} {'d.e.g':'Int64'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.d':'Array(Nullable(Int64))'} {'e':'String'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.d':'Array(Nullable(Int64))'} {'c':'Int64'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))'} {'d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} +JSON(a.b.c UInt32, max_dynamic_paths=0) +{"a":{"b":{"c":1,"d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {} {'a.b.d':'Array(Nullable(Int64))','e':'String'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} {'a.b.d':'Array(Nullable(Int64))','c':'Int64'} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} +JSON(a.b.c UInt32, max_dynamic_types=1) +{"a":{"b":{"c":1,"d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":2,"d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":3,"d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":4,"d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":5,"d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {'a.b.d':'Array(Nullable(Int64))','b':'Date','c':'Int64','d.e.f':'Array(Nullable(String))'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(Int64))','d.e.g':'Int64'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','e':'String'} {'a.b.d':'Array(Nullable(Int64))','e':'String'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','c':'Int64'} {'a.b.d':'Array(Nullable(Int64))','c':'Int64'} {} +{'a.b.c':'UInt32','a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {'a.b.d':'Array(Nullable(Int64))','b':'Array(Nullable(String))','d.e.f':'Array(Nullable(String))','d.e.g':'Int64','d.e.h':'DateTime'} {} +Test small max_read_buffer_size +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +Test PrettyJSONEachRow +{ + "json": { + "a" : { + "b" : { + "c" : "1", + "d" : [ + "0", + "1" + ] + } + }, + "b" : "2020-01-01", + "c" : "42", + "d" : { + "e" : { + "f" : [ + "s1", + "s2" + ] + } + } + } +} +{ + "json": { + "a" : { + "b" : { + "c" : "2", + "d" : [ + "2", + "3" + ] + } + }, + "b" : [ + "1", + "2", + "3" + ], + "d" : { + "e" : { + "g" : "43" + } + } + } +} +{ + "json": { + "a" : { + "b" : { + "c" : "3", + "d" : [ + "4", + "5" + ] + } + }, + "e" : "Hello, World!" + } +} +{ + "json": { + "a" : { + "b" : { + "c" : "4", + "d" : [ + "6", + "7" + ] + } + }, + "c" : "43" + } +} +{ + "json": { + "a" : { + "b" : { + "c" : "5", + "d" : [ + "8", + "9" + ] + } + }, + "b" : [ + "b1", + "b2" + ], + "d" : { + "e" : { + "f" : [ + "s3", + "s4" + ], + "g" : "44", + "h" : "2020-02-02 10:00:00" + } + } + } +} +Test TSV +{"a":{"b":{"c":"1","d":["0","1"]}},"b":"2020-01-01","c":"42","d":{"e":{"f":["s1","s2"]}}} +{"a":{"b":{"c":"2","d":["2","3"]}},"b":["1","2","3"],"d":{"e":{"g":"43"}}} +{"a":{"b":{"c":"3","d":["4","5"]}},"e":"Hello, World!"} +{"a":{"b":{"c":"4","d":["6","7"]}},"c":"43"} +{"a":{"b":{"c":"5","d":["8","9"]}},"b":["b1","b2"],"d":{"e":{"f":["s3","s4"],"g":"44","h":"2020-02-02 10:00:00"}}} +Test CSV +"{""a"":{""b"":{""c"":""1"",""d"":[""0"",""1""]}},""b"":""2020-01-01"",""c"":""42"",""d"":{""e"":{""f"":[""s1"",""s2""]}}}" +"{""a"":{""b"":{""c"":""2"",""d"":[""2"",""3""]}},""b"":[""1"",""2"",""3""],""d"":{""e"":{""g"":""43""}}}" +"{""a"":{""b"":{""c"":""3"",""d"":[""4"",""5""]}},""e"":""Hello, World!""}" +"{""a"":{""b"":{""c"":""4"",""d"":[""6"",""7""]}},""c"":""43""}" +"{""a"":{""b"":{""c"":""5"",""d"":[""8"",""9""]}},""b"":[""b1"",""b2""],""d"":{""e"":{""f"":[""s3"",""s4""],""g"":""44"",""h"":""2020-02-02 10:00:00""}}}" diff --git a/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh b/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh new file mode 100755 index 00000000000..7e53e4388ec --- /dev/null +++ b/tests/queries/0_stateless/03206_json_parsing_and_formatting.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +DATA_FILE=$CLICKHOUSE_TEST_UNIQUE_NAME.json + +echo '{"a" : {"b" : {"c" : 1, "d" : [0, 1]}}, "b" : "2020-01-01", "c" : 42, "d" : {"e" : {"f" : ["s1", "s2"]}}} +{"a" : {"b" : {"c" : 2, "d" : [2, 3]}}, "b" : [1, 2, 3], "c" : null, "d" : {"e" : {"g" : 43}}} +{"a" : {"b" : {"c" : 3, "d" : [4, 5]}}, "e" : "Hello, World!"} +{"a" : {"b" : {"c" : 4, "d" : [6, 7]}}, "c" : 43} +{"a" : {"b" : {"c" : 5, "d" : [8, 9]}}, "b" : ["b1", "b2"], "d" : {"e" : {"f" : ["s3", "s4"], "g" : 44, "h" : "2020-02-02 10:00:00"}}}' > $DATA_FILE + +echo "JSON with no arguments" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json.a.b.c, json.b, json.^d from file($DATA_FILE, JSONAsObject)" + +echo "JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP '.*h.*')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP \'.*h.*\')')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b Tuple(c UInt32, d Array(Bool)), SKIP d.e, SKIP c, SKIP REGEXP \'.*h.*\')')" + +echo "JSON(a.b.c UInt32, max_dynamic_paths=2)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=2)')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=2)')" + +echo "JSON(a.b.c UInt32, max_dynamic_paths=0)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=0)')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_paths=0)')" + +echo "JSON(a.b.c UInt32, max_dynamic_types=1)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_types=0)')" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json) from file($DATA_FILE, JSONAsObject, 'json JSON(a.b.c UInt32, max_dynamic_types=0)')" + +echo "Test small max_read_buffer_size" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=1 -q "select json from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=2 -q "select json from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=3 -q "select json from file($DATA_FILE, JSONAsObject)" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 --max_read_buffer_size=4 -q "select json from file($DATA_FILE, JSONAsObject)" + +echo "Test PrettyJSONEachRow" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, JSONAsObject) format PrettyJSONEachRow" + +echo "Test TSV" +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, TSV, 'json JSON') format TSV" +echo "Test CSV" +echo '"{""a"" : {""b"" : {""c"" : 1, ""d"" : [0, 1]}}, ""b"" : ""2020-01-01"", ""c"" : 42, ""d"" : {""e"" : {""f"" : [""s1"", ""s2""]}}}" +"{""a"" : {""b"" : {""c"" : 2, ""d"" : [2, 3]}}, ""b"" : [1, 2, 3], ""c"" : null, ""d"" : {""e"" : {""g"" : 43}}}" +"{""a"" : {""b"" : {""c"" : 3, ""d"" : [4, 5]}}, ""e"" : ""Hello, World!""}" +"{""a"" : {""b"" : {""c"" : 4, ""d"" : [6, 7]}}, ""c"" : 43}" +"{""a"" : {""b"" : {""c"" : 5, ""d"" : [8, 9]}}, ""b"" : [""b1"", ""b2""], ""d"" : {""e"" : {""f"" : [""s3"", ""s4""], ""g"" : 44, ""h"" : ""2020-02-02 10:00:00""}}}"' > $DATA_FILE +$CLICKHOUSE_LOCAL --allow_experimental_json_type=1 -q "select json from file($DATA_FILE, CSV, 'json JSON') format CSV" + +rm $DATA_FILE diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 new file mode 100644 index 00000000000..a93a2259442 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.reference.j2 @@ -0,0 +1,826 @@ +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sql.j2 new file mode 100644 index 00000000000..0ec1a86372b --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_compact_merge_tree.sql.j2 @@ -0,0 +1,93 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; + +select json.non.existing.path from test order by id format JSONColumns; +select json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path from test order by id format JSONColumns; +select json, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; + +select json.a.b.c from test order by id format JSONColumns; +select json, json.a.b.c from test order by id format JSONColumns; + +select json.b.b.e from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.d.b from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; + +select json.^a, json.a.b.c from test order by id format JSONColumns; +select json, json.^a, json.a.b.c from test order by id format JSONColumns; + +select json.^a, json.a.b.d from test order by id format JSONColumns; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d from test order by id format JSONColumns; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +{% endfor -%} + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference new file mode 100644 index 00000000000..6276be52c0d --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.reference @@ -0,0 +1,413 @@ +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql new file mode 100644 index 00000000000..51e6970759d --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_memory.sql @@ -0,0 +1,87 @@ +-- Tags: no-fasttest, long +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type=1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory; + +truncate table test; +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; + +select json.non.existing.path from test order by id format JSONColumns; +select json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path from test order by id format JSONColumns; +select json, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; + +select json.a.b.c from test order by id format JSONColumns; +select json, json.a.b.c from test order by id format JSONColumns; + +select json.b.b.e from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.d.b from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; + +select json.^a, json.a.b.c from test order by id format JSONColumns; +select json, json.^a, json.a.b.c from test order by id format JSONColumns; + +select json.^a, json.a.b.d from test order by id format JSONColumns; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d from test order by id format JSONColumns; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +drop table test; diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 new file mode 100644 index 00000000000..a93a2259442 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.reference.j2 @@ -0,0 +1,826 @@ +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._25','Int64') +('b.b._26','Int64') +('b.b._27','Int64') +('b.b._28','Int64') +('b.b._29','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.e": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, "str_10", "str_11", "str_12", "str_13", "str_14", null, null, null, null, null, "str_20", "str_21", "str_22", "str_23", "str_24", "str_25", "str_26", "str_27", "str_28", "str_29", "str_30", "str_31", "str_32", "str_33", "str_34", "str_35", "str_36", "str_37", "str_38", "str_39"], + "json.a.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "25", null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._25.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "27", null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._27.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "28", null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._28.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._29": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "29", null, null, null, null, null, null, null, null, null, null], + "json.b.b._29.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "15", "16", "17", "18", "19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.d.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.c": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "1970-01-31", "1970-02-01", "1970-02-02", "1970-02-03", "1970-02-04", null, null, null, null, null], + "json.d.c.:`UUID`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.^`n`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.^`a`.b": [{"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":5}, {"c":6}, {"c":7}, {"c":8}, {"c":9}, {"c":0,"d":"10","e":"str_10"}, {"c":0,"d":"11","e":"str_11"}, {"c":0,"d":"12","e":"str_12"}, {"c":0,"d":"13","e":"str_13"}, {"c":0,"d":"14","e":"str_14"}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":0}, {"c":20,"d":"20","e":"str_20"}, {"c":21,"d":"21","e":"str_21"}, {"c":22,"d":"22","e":"str_22"}, {"c":23,"d":"23","e":"str_23"}, {"c":24,"d":"24","e":"str_24"}, {"c":25,"d":"25","e":"str_25"}, {"c":26,"d":"26","e":"str_26"}, {"c":27,"d":"27","e":"str_27"}, {"c":28,"d":"28","e":"str_28"}, {"c":29,"d":"29","e":"str_29"}, {"c":30,"d":[],"e":"str_30"}, {"c":31,"d":[],"e":"str_31"}, {"c":32,"d":[],"e":"str_32"}, {"c":33,"d":[],"e":"str_33"}, {"c":34,"d":[],"e":"str_34"}, {"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}, {"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}, {"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}, {"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}, {"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}], + "json.^`b`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"b":{"d":"15","e":"str_15"}}, {"b":{"d":"16","e":"str_16"}}, {"b":{"d":"17","e":"str_17"}}, {"b":{"d":"18","e":"str_18"}}, {"b":{"d":"19","e":"str_19"}}, {}, {}, {}, {}, {}, {"b":{"_25":"25"}}, {"b":{"_26":"26"}}, {"b":{"_27":"27"}}, {"b":{"_28":"28"}}, {"b":{"_29":"29"}}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}], + "json.^`d`": [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {"a":"30","c":"1970-01-31"}, {"a":"31","c":"1970-02-01"}, {"a":"32","c":"1970-02-02"}, {"a":"33","c":"1970-02-03"}, {"a":"34","c":"1970-02-04"}, {"a":["0"],"b":"35"}, {"a":["0","1"],"b":"36"}, {"a":["0","1","2"],"b":"37"}, {"a":["0","1","2","3"],"b":"38"}, {"a":["0","1","2","3","4"],"b":"39"}] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.non.existing.path": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.non.existing.path.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.b.b.e": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`String`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "str_15", "str_16", "str_17", "str_18", "str_19", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b.e.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.d.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "35", "36", "37", "38", "39"], + "json.d.b.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.b.b._26": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.d.a": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "30", "31", "32", "33", "34", ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], ["0"], ["0","1"], ["0","1","2"], ["0","1","2","3"], ["0","1","2","3","4"]], + "json.d.a.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Int64`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, "26", null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null], + "json.b.b._26.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.c": [0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} +{ + "json": [{"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":0}}}, {"a":{"b":{"c":5}}}, {"a":{"b":{"c":6}}}, {"a":{"b":{"c":7}}}, {"a":{"b":{"c":8}}}, {"a":{"b":{"c":9}}}, {"a":{"b":{"c":0,"d":"10","e":"str_10"}}}, {"a":{"b":{"c":0,"d":"11","e":"str_11"}}}, {"a":{"b":{"c":0,"d":"12","e":"str_12"}}}, {"a":{"b":{"c":0,"d":"13","e":"str_13"}}}, {"a":{"b":{"c":0,"d":"14","e":"str_14"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"15","e":"str_15"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"16","e":"str_16"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"17","e":"str_17"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"18","e":"str_18"}}}, {"a":{"b":{"c":0}},"b":{"b":{"d":"19","e":"str_19"}}}, {"a":{"b":{"c":20,"d":"20","e":"str_20"}}}, {"a":{"b":{"c":21,"d":"21","e":"str_21"}}}, {"a":{"b":{"c":22,"d":"22","e":"str_22"}}}, {"a":{"b":{"c":23,"d":"23","e":"str_23"}}}, {"a":{"b":{"c":24,"d":"24","e":"str_24"}}}, {"a":{"b":{"c":25,"d":"25","e":"str_25"}},"b":{"b":{"_25":"25"}}}, {"a":{"b":{"c":26,"d":"26","e":"str_26"}},"b":{"b":{"_26":"26"}}}, {"a":{"b":{"c":27,"d":"27","e":"str_27"}},"b":{"b":{"_27":"27"}}}, {"a":{"b":{"c":28,"d":"28","e":"str_28"}},"b":{"b":{"_28":"28"}}}, {"a":{"b":{"c":29,"d":"29","e":"str_29"}},"b":{"b":{"_29":"29"}}}, {"a":{"b":{"c":30,"d":[],"e":"str_30"}},"d":{"a":"30","c":"1970-01-31"}}, {"a":{"b":{"c":31,"d":[],"e":"str_31"}},"d":{"a":"31","c":"1970-02-01"}}, {"a":{"b":{"c":32,"d":[],"e":"str_32"}},"d":{"a":"32","c":"1970-02-02"}}, {"a":{"b":{"c":33,"d":[],"e":"str_33"}},"d":{"a":"33","c":"1970-02-03"}}, {"a":{"b":{"c":34,"d":[],"e":"str_34"}},"d":{"a":"34","c":"1970-02-04"}}, {"a":{"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}},"d":{"a":["0"],"b":"35"}}, {"a":{"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}},"d":{"a":["0","1"],"b":"36"}}, {"a":{"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}},"d":{"a":["0","1","2"],"b":"37"}}, {"a":{"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}},"d":{"a":["0","1","2","3"],"b":"38"}}, {"a":{"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}},"d":{"a":["0","1","2","3","4"],"b":"39"}}], + "json.^`a`": [{"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":5}}, {"b":{"c":6}}, {"b":{"c":7}}, {"b":{"c":8}}, {"b":{"c":9}}, {"b":{"c":0,"d":"10","e":"str_10"}}, {"b":{"c":0,"d":"11","e":"str_11"}}, {"b":{"c":0,"d":"12","e":"str_12"}}, {"b":{"c":0,"d":"13","e":"str_13"}}, {"b":{"c":0,"d":"14","e":"str_14"}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":0}}, {"b":{"c":20,"d":"20","e":"str_20"}}, {"b":{"c":21,"d":"21","e":"str_21"}}, {"b":{"c":22,"d":"22","e":"str_22"}}, {"b":{"c":23,"d":"23","e":"str_23"}}, {"b":{"c":24,"d":"24","e":"str_24"}}, {"b":{"c":25,"d":"25","e":"str_25"}}, {"b":{"c":26,"d":"26","e":"str_26"}}, {"b":{"c":27,"d":"27","e":"str_27"}}, {"b":{"c":28,"d":"28","e":"str_28"}}, {"b":{"c":29,"d":"29","e":"str_29"}}, {"b":{"c":30,"d":[],"e":"str_30"}}, {"b":{"c":31,"d":[],"e":"str_31"}}, {"b":{"c":32,"d":[],"e":"str_32"}}, {"b":{"c":33,"d":[],"e":"str_33"}}, {"b":{"c":34,"d":[],"e":"str_34"}}, {"b":{"c":35,"d":"1970-01-01 00:00:35","e":"str_35"}}, {"b":{"c":36,"d":"1970-01-01 00:00:36","e":"str_36"}}, {"b":{"c":37,"d":"1970-01-01 00:00:37","e":"str_37"}}, {"b":{"c":38,"d":"1970-01-01 00:00:38","e":"str_38"}}, {"b":{"c":39,"d":"1970-01-01 00:00:39","e":"str_39"}}], + "json.a.b.d": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", [], [], [], [], [], "1970-01-01 00:00:35", "1970-01-01 00:00:36", "1970-01-01 00:00:37", "1970-01-01 00:00:38", "1970-01-01 00:00:39"], + "json.a.b.d.:`Int64`": [null, null, null, null, null, null, null, null, null, null, "10", "11", "12", "13", "14", null, null, null, null, null, "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", null, null, null, null, null, null, null, null, null, null], + "json.a.b.d.:`Date`": [null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null] +} diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sql.j2 new file mode 100644 index 00000000000..f571d2417f4 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_1_wide_merge_tree.sql.j2 @@ -0,0 +1,93 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; + +select json.non.existing.path from test order by id format JSONColumns; +select json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path from test order by id format JSONColumns; +select json, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; + +select json.a.b.c from test order by id format JSONColumns; +select json, json.a.b.c from test order by id format JSONColumns; + +select json.b.b.e from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.d.b from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; + +select json.^a, json.a.b.c from test order by id format JSONColumns; +select json, json.^a, json.a.b.c from test order by id format JSONColumns; + +select json.^a, json.a.b.d from test order by id format JSONColumns; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d from test order by id format JSONColumns; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +{% endfor -%} + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 new file mode 100644 index 00000000000..e1e69879cfb --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.reference.j2 @@ -0,0 +1,66 @@ +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 new file mode 100644 index 00000000000..6c33044b5d8 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_compact_merge_tree.sql.j2 @@ -0,0 +1,128 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(100000); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; + +select count() from test where json.non.existing.path is Null; +select count() from test where json.non.existing.path.:String is Null; +select json.non.existing.path from test order by id format Null; +select json.non.existing.path.:Int64 from test order by id format Null; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path from test order by id format Null; +select json, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; + +select count() from test where json.a.b.c == 0; +select json.a.b.c from test format Null; +select json.a.b.c from test order by id format Null; +select json, json.a.b.c from test format Null; +select json, json.a.b.c from test order by id format Null; + +select count() from test where json.b.b.e is Null; +select count() from test where json.b.b.e.:String is Null; +select json.b.b.e from test format Null; +select json.b.b.e from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e from test format Null; +select json, json.b.b.e from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.a.b.d is Null ; +select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null; +select json.b.b.e, json.a.b.d from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.a.b.d from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`); +select json.b.b.e, json.d.a from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.d.b is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.d.a, json.d.b from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.b from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.b.b.`_1` is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.b.b.`_1`.:Int64 is Null; +select json.d.a, json.b.b.`_1` from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.b.b.`_1` from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; + +select count() from test where empty(json.^a) and json.a.b.c == 0; +select json.^a, json.a.b.c from test order by id format Null; +select json, json.^a, json.a.b.c from test format Null; +select json, json.^a, json.a.b.c from test order by id format Null; + +select count() from test where empty(json.^a) and json.a.b.d is Null; +select json.^a, json.a.b.d from test order by id format Null; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d from test order by id format Null; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +{% endfor -%} + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference new file mode 100644 index 00000000000..1ef53fb5716 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.reference @@ -0,0 +1,33 @@ +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql new file mode 100644 index 00000000000..cc646987c80 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_memory.sql @@ -0,0 +1,123 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory; + +truncate table test; +insert into test select number, '{}' from numbers(100000); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; + +select count() from test where json.non.existing.path is Null; +select count() from test where json.non.existing.path.:String is Null; +select json.non.existing.path from test order by id format Null; +select json.non.existing.path.:Int64 from test order by id format Null; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path from test order by id format Null; +select json, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; + +select count() from test where json.a.b.c == 0; +select json.a.b.c from test format Null; +select json.a.b.c from test order by id format Null; +select json, json.a.b.c from test format Null; +select json, json.a.b.c from test order by id format Null; + +select count() from test where json.b.b.e is Null; +select count() from test where json.b.b.e.:String is Null; +select json.b.b.e from test format Null; +select json.b.b.e from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e from test format Null; +select json, json.b.b.e from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.a.b.d is Null ; +select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null; +select json.b.b.e, json.a.b.d from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.a.b.d from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`); +select json.b.b.e, json.d.a from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.d.b is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.d.a, json.d.b from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.b from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.b.b.`_1` is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.b.b.`_1`.:Int64 is Null; +select json.d.a, json.b.b.`_1` from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.b.b.`_1` from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; + +select count() from test where empty(json.^a) and json.a.b.c == 0; +select json.^a, json.a.b.c from test order by id format Null; +select json, json.^a, json.a.b.c from test format Null; +select json, json.^a, json.a.b.c from test order by id format Null; + +select count() from test where empty(json.^a) and json.a.b.d is Null; +select json.^a, json.a.b.d from test order by id format Null; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d from test order by id format Null; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 new file mode 100644 index 00000000000..e1e69879cfb --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.reference.j2 @@ -0,0 +1,66 @@ +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 +('a.b.c','UInt32') +('a.b.d','Array(Nullable(String))') +('a.b.d','DateTime') +('a.b.d','Int64') +('a.b.e','String') +('b.b._0','Int64') +('b.b._1','Int64') +('b.b._2','Int64') +('b.b._3','Int64') +('b.b._4','Int64') +('b.b.d','Int64') +('b.b.e','String') +('d.a','Array(Nullable(Int64))') +('d.a','Int64') +('d.b','Int64') +('d.c','Date') +800000 +800000 +300000 +700000 +700000 +200000 +400000 +500000 +600000 +500000 +600000 +600000 +700000 +580000 +680000 +0 +0 diff --git a/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 new file mode 100644 index 00000000000..ab4e0437c15 --- /dev/null +++ b/tests/queries/0_stateless/03207_json_read_subcolumns_2_wide_merge_tree.sql.j2 @@ -0,0 +1,128 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, '{}' from numbers(100000); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; + +select count() from test where json.non.existing.path is Null; +select count() from test where json.non.existing.path.:String is Null; +select json.non.existing.path from test order by id format Null; +select json.non.existing.path.:Int64 from test order by id format Null; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path from test order by id format Null; +select json, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; + +select count() from test where json.a.b.c == 0; +select json.a.b.c from test format Null; +select json.a.b.c from test order by id format Null; +select json, json.a.b.c from test format Null; +select json, json.a.b.c from test order by id format Null; + +select count() from test where json.b.b.e is Null; +select count() from test where json.b.b.e.:String is Null; +select json.b.b.e from test format Null; +select json.b.b.e from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e from test format Null; +select json, json.b.b.e from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.a.b.d is Null ; +select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null; +select json.b.b.e, json.a.b.d from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.a.b.d from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`); +select json.b.b.e, json.d.a from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.d.b is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.d.a, json.d.b from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.b from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.b.b.`_1` is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.b.b.`_1`.:Int64 is Null; +select json.d.a, json.b.b.`_1` from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.b.b.`_1` from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; + +select count() from test where empty(json.^a) and json.a.b.c == 0; +select json.^a, json.a.b.c from test order by id format Null; +select json, json.^a, json.a.b.c from test format Null; +select json, json.^a, json.a.b.c from test order by id format Null; + +select count() from test where empty(json.^a) and json.a.b.d is Null; +select json.^a, json.a.b.d from test order by id format Null; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d from test order by id format Null; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +{% endfor -%} + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference.j2 new file mode 100644 index 00000000000..0228ae1e7df --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.reference.j2 @@ -0,0 +1,545 @@ +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +{ + "json": [{"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[]}}, {"a":{"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}, {"a":{"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}}, {"a":{"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}}, {"a":{"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}}, {"a":{"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}}, {"a":{"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}}], + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.b": [[], [], [], [], [], [{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.c.d.e": [[], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_0": [[], [], [], [], [], ["5"], ["6",null], ["7",null,null], ["8",null,null,null], ["9",null,null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_1": [[], [], [], [], [], [null], [null,"6"], [null,"7",null], [null,"8",null,null], [null,"9",null,null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_2": [[], [], [], [], [], [null], [null,null], [null,null,"7"], [null,null,"8",null], [null,null,"9",null,null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_3": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"8"], [null,null,null,"9",null], [], [], [], [], [], [], [], [], [], []], + "json.a.b.b.c.d_4": [[], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"9"], [], [], [], [], [], [], [], [], [], []], + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`": [[], [], [], [], [], [], [], [], [], [], [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_2": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,"12"], [null,null,"13",null], [null,null,"14",null,null], [null], [null,null], [null,null,"17"], [null,null,"18",null], [null,null,"19",null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_3": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,"13"], [null,null,null,"14",null], [null], [null,null], [null,null,null], [null,null,null,"18"], [null,null,null,"19",null]], + "json.a.r.:`Array(JSON)`.b.c.d_4": [[], [], [], [], [], [], [], [], [], [], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"14"], [null], [null,null], [null,null,null], [null,null,null,null], [null,null,null,null,"19"]], + "json.^`a`": [{"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[]}, {"b":[{"b":{"c":{"d_0":"5"}},"c":{"d":{"e":["0"]}}}]}, {"b":[{"b":{"c":{"d_0":"6"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"6"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[{"b":{"c":{"d_0":"7"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"7"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"7"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[{"b":{"c":{"d_0":"8"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"8"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"8"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"8"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[{"b":{"c":{"d_0":"9"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"9"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"9"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"9"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"9"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}]}, {"b":[],"r":[{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}]}, {"a1":"15","a2":"15","a3":"15","a4":"15","a5":"15","a6":"15","a7":"15","a8":"15","b":[],"r":[{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}]}, {"a1":"16","a2":"16","a3":"16","a4":"16","a5":"16","a6":"16","a7":"16","a8":"16","b":[],"r":[{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}]}, {"a1":"17","a2":"17","a3":"17","a4":"17","a5":"17","a6":"17","a7":"17","a8":"17","b":[],"r":[{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}]}, {"a1":"18","a2":"18","a3":"18","a4":"18","a5":"18","a6":"18","a7":"18","a8":"18","b":[],"r":[{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}]}, {"a1":"19","a2":"19","a3":"19","a4":"19","a5":"19","a6":"19","a7":"19","a8":"19","b":[],"r":[{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]}], + "json.a.b.^`b`.c": [[], [], [], [], [], [{"d_0":"5"}], [{"d_0":"6"},{"d_1":"6"}], [{"d_0":"7"},{"d_1":"7"},{"d_2":"7"}], [{"d_0":"8"},{"d_1":"8"},{"d_2":"8"},{"d_3":"8"}], [{"d_0":"9"},{"d_1":"9"},{"d_2":"9"},{"d_3":"9"},{"d_4":"9"}], [], [], [], [], [], [], [], [], [], []], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.c.d.e.:`Array(Nullable(Int64))`": [[], [], [], [], [], [], [], [], [], [], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]], [["0"]], [["0","1"],["0","1","2"]], [["0","1","2"],["0","1","2","3"],["0","1","2","3","4"]], [["0","1","2","3"],["0","1","2","3","4"],["0"],["0","1"]], [["0","1","2","3","4"],["0"],["0","1"],["0","1","2"],["0","1","2","3"]]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]], + "json.a.r.:`Array(JSON)`.b.c.d_1.:`Int64`": [[], [], [], [], [], [], [], [], [], [], [null], [null,"11"], [null,"12",null], [null,"13",null,null], [null,"14",null,null,null], [null], [null,"16"], [null,"17",null], [null,"18",null,null], [null,"19",null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} +{ + "json.a.r": [null, null, null, null, null, null, null, null, null, null, [{"b":{"c":{"d_0":"10"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"11"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"11"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"12"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"12"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"12"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"13"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"13"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"13"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"13"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"14"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"14"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"14"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"14"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"14"}},"c":{"d":{"e":["0","1","2","3"]}}}], [{"b":{"c":{"d_0":"15"}},"c":{"d":{"e":["0"]}}}], [{"b":{"c":{"d_0":"16"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_1":"16"}},"c":{"d":{"e":["0","1","2"]}}}], [{"b":{"c":{"d_0":"17"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_1":"17"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_2":"17"}},"c":{"d":{"e":["0","1","2","3","4"]}}}], [{"b":{"c":{"d_0":"18"}},"c":{"d":{"e":["0","1","2","3"]}}},{"b":{"c":{"d_1":"18"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_2":"18"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_3":"18"}},"c":{"d":{"e":["0","1"]}}}], [{"b":{"c":{"d_0":"19"}},"c":{"d":{"e":["0","1","2","3","4"]}}},{"b":{"c":{"d_1":"19"}},"c":{"d":{"e":["0"]}}},{"b":{"c":{"d_2":"19"}},"c":{"d":{"e":["0","1"]}}},{"b":{"c":{"d_3":"19"}},"c":{"d":{"e":["0","1","2"]}}},{"b":{"c":{"d_4":"19"}},"c":{"d":{"e":["0","1","2","3"]}}}]], + "json.a.r.:`Array(JSON)`.^`b`": [[], [], [], [], [], [], [], [], [], [], [{"c":{"d_0":"10"}}], [{"c":{"d_0":"11"}},{"c":{"d_1":"11"}}], [{"c":{"d_0":"12"}},{"c":{"d_1":"12"}},{"c":{"d_2":"12"}}], [{"c":{"d_0":"13"}},{"c":{"d_1":"13"}},{"c":{"d_2":"13"}},{"c":{"d_3":"13"}}], [{"c":{"d_0":"14"}},{"c":{"d_1":"14"}},{"c":{"d_2":"14"}},{"c":{"d_3":"14"}},{"c":{"d_4":"14"}}], [{"c":{"d_0":"15"}}], [{"c":{"d_0":"16"}},{"c":{"d_1":"16"}}], [{"c":{"d_0":"17"}},{"c":{"d_1":"17"}},{"c":{"d_2":"17"}}], [{"c":{"d_0":"18"}},{"c":{"d_1":"18"}},{"c":{"d_2":"18"}},{"c":{"d_3":"18"}}], [{"c":{"d_0":"19"}},{"c":{"d_1":"19"}},{"c":{"d_2":"19"}},{"c":{"d_3":"19"}},{"c":{"d_4":"19"}}]], + "json.a.r.:`Array(JSON)`.^`b`.c": [[], [], [], [], [], [], [], [], [], [], [{"d_0":"10"}], [{"d_0":"11"},{"d_1":"11"}], [{"d_0":"12"},{"d_1":"12"},{"d_2":"12"}], [{"d_0":"13"},{"d_1":"13"},{"d_2":"13"},{"d_3":"13"}], [{"d_0":"14"},{"d_1":"14"},{"d_2":"14"},{"d_3":"14"},{"d_4":"14"}], [{"d_0":"15"}], [{"d_0":"16"},{"d_1":"16"}], [{"d_0":"17"},{"d_1":"17"},{"d_2":"17"}], [{"d_0":"18"},{"d_1":"18"},{"d_2":"18"},{"d_3":"18"}], [{"d_0":"19"},{"d_1":"19"},{"d_2":"19"},{"d_3":"19"},{"d_4":"19"}]], + "json.a.r.:`Array(JSON)`.b.c.d_0.:`Int64`": [[], [], [], [], [], [], [], [], [], [], ["10"], ["11",null], ["12",null,null], ["13",null,null,null], ["14",null,null,null,null], ["15"], ["16",null], ["17",null,null], ["18",null,null,null], ["19",null,null,null,null]] +} diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sql.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sql.j2 new file mode 100644 index 00000000000..1353980cd35 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_1.sql.j2 @@ -0,0 +1,41 @@ +-- Tags: no-fasttest, long + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=Memory;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; system stop merges test;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; system start merges test;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; system stop merges test;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; system start merges test;'] -%} + +{{ create_command }} + +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(5, 5); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10, 5); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(15, 5); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format JSONColumns; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format JSONColumns; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format JSONColumns; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format JSONColumns; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format JSONColumns; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format JSONColumns; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format JSONColumns; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format JSONColumns; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format JSONColumns; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format JSONColumns; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference.j2 new file mode 100644 index 00000000000..2fd3437e3d2 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.reference.j2 @@ -0,0 +1,60 @@ +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 new file mode 100644 index 00000000000..e3930165602 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_compact_merge_tree.sql.j2 @@ -0,0 +1,57 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(10000); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; + +select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1); +select count() from test where empty(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64); +select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null; +select count() from test where arrayJoin(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; + +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; + +{% endfor -%} + +drop table test; diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference new file mode 100644 index 00000000000..34557cf60bb --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.reference @@ -0,0 +1,30 @@ +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql new file mode 100644 index 00000000000..9274b9b9cf7 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_memory.sql @@ -0,0 +1,52 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(10000); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; + +select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1); +select count() from test where empty(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64); +select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null; +select count() from test where arrayJoin(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; + +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference.j2 new file mode 100644 index 00000000000..2fd3437e3d2 --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.reference.j2 @@ -0,0 +1,60 @@ +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 +('a.a1','String') +('a.a2','String') +('a.a3','String') +('a.a4','String') +('a.a5','String') +('a.a6','String') +('a.a7','String') +('a.a8','String') +('a.b','Array(JSON)') +('a.r','Array(JSON(max_dynamic_types=16, max_dynamic_paths=2))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +('b.c.d_0','Int64') +('b.c.d_1','Int64') +('b.c.d_2','Int64') +('b.c.d_3','Int64') +('b.c.d_4','Int64') +('c.d.e','Array(Nullable(Int64))') +20000 +20000 +0 +0 +20000 +20000 +0 +0 diff --git a/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 new file mode 100644 index 00000000000..3010fa0e2de --- /dev/null +++ b/tests/queries/0_stateless/03208_array_of_json_read_subcolumns_2_wide_merge_tree.sql.j2 @@ -0,0 +1,57 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, '{}' from numbers(10000); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000); + +{% for merge_command in ['system stop merges test', 'system start merges test'] -%} + +{{ merge_command }}; + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; + +select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1); +select count() from test where empty(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64); +select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null; +select count() from test where arrayJoin(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; + +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; + +{% endfor -%} + +drop table test; diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 new file mode 100644 index 00000000000..ea4e1da7181 --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.reference.j2 @@ -0,0 +1,136 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10; +Dynamic paths +100000 a +90000 b +80000 c +70000 d +60000 e +Shared data paths +Dynamic paths +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +10000 g +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +100000 a +90000 b +40000 c +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +120000 c +100000 a +Shared data paths +90000 b +70000 d +60000 e +10000 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10; +Dynamic paths +100000 a +90000 b +80000 c +70000 d +60000 e +Shared data paths +Dynamic paths +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +10000 g +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +100000 a +90000 b +40000 c +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +120000 c +100000 a +Shared data paths +90000 b +70000 d +60000 e +10000 g diff --git a/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 new file mode 100644 index 00000000000..cc143e4ceef --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_horizontal_merges.sql.j2 @@ -0,0 +1,74 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10;'] -%} + +select '{{ create_command }}'; + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(100000); +insert into test select number, toJSONString(map('b', number)) from numbers(90000); +insert into test select number, toJSONString(map('c', number)) from numbers(80000); +insert into test select number, toJSONString(map('d', number)) from numbers(70000); +insert into test select number, toJSONString(map('e', number)) from numbers(60000); +insert into test select number, '{}' from numbers(100000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('f', number)) from numbers(200000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('g', number)) from numbers(10000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('c', number)) from numbers(40000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03209_json_type_merges_small.reference.j2 b/tests/queries/0_stateless/03209_json_type_merges_small.reference.j2 new file mode 100644 index 00000000000..f953dee10fe --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_merges_small.reference.j2 @@ -0,0 +1,272 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10; +Dynamic paths +10 a +9 b +8 c +7 d +6 e +Shared data paths +Dynamic paths +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +1 g +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +10 a +9 b +4 c +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +12 c +10 a +Shared data paths +9 b +7 d +6 e +1 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10; +Dynamic paths +10 a +9 b +8 c +7 d +6 e +Shared data paths +Dynamic paths +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +1 g +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +10 a +9 b +4 c +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +12 c +10 a +Shared data paths +9 b +7 d +6 e +1 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +10 a +9 b +8 c +7 d +6 e +Shared data paths +Dynamic paths +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +1 g +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +10 a +9 b +4 c +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +12 c +10 a +Shared data paths +9 b +7 d +6 e +1 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +10 a +9 b +8 c +7 d +6 e +Shared data paths +Dynamic paths +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +8 c +Shared data paths +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +1 g +Shared data paths +8 c +7 d +6 e +Dynamic paths +20 f +10 a +9 b +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +10 a +9 b +4 c +Shared data paths +8 c +7 d +6 e +1 g +Dynamic paths +20 f +12 c +10 a +Shared data paths +9 b +7 d +6 e +1 g diff --git a/tests/queries/0_stateless/03209_json_type_merges_small.sql.j2 b/tests/queries/0_stateless/03209_json_type_merges_small.sql.j2 new file mode 100644 index 00000000000..e4b64ac7561 --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_merges_small.sql.j2 @@ -0,0 +1,76 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} + +select '{{ create_command }}'; + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(10); +insert into test select number, toJSONString(map('b', number)) from numbers(9); +insert into test select number, toJSONString(map('c', number)) from numbers(8); +insert into test select number, toJSONString(map('d', number)) from numbers(7); +insert into test select number, toJSONString(map('e', number)) from numbers(6); +insert into test select number, '{}' from numbers(100000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('f', number)) from numbers(20); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('g', number)) from numbers(1); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('c', number)) from numbers(4); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 new file mode 100644 index 00000000000..d292b1454c6 --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.reference.j2 @@ -0,0 +1,136 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +100000 a +90000 b +80000 c +70000 d +60000 e +Shared data paths +Dynamic paths +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +10000 g +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +100000 a +90000 b +40000 c +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +120000 c +100000 a +Shared data paths +90000 b +70000 d +60000 e +10000 g +create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +100000 a +90000 b +80000 c +70000 d +60000 e +Shared data paths +Dynamic paths +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +80000 c +Shared data paths +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +10000 g +Shared data paths +80000 c +70000 d +60000 e +Dynamic paths +200000 f +100000 a +90000 b +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +100000 a +90000 b +40000 c +Shared data paths +80000 c +70000 d +60000 e +10000 g +Dynamic paths +200000 f +120000 c +100000 a +Shared data paths +90000 b +70000 d +60000 e +10000 g diff --git a/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 b/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 new file mode 100644 index 00000000000..e427db7677f --- /dev/null +++ b/tests/queries/0_stateless/03209_json_type_vertical_merges.sql.j2 @@ -0,0 +1,74 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} + +select '{{ create_command }}'; + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(100000); +insert into test select number, toJSONString(map('b', number)) from numbers(90000); +insert into test select number, toJSONString(map('c', number)) from numbers(80000); +insert into test select number, toJSONString(map('d', number)) from numbers(70000); +insert into test select number, toJSONString(map('e', number)) from numbers(60000); +insert into test select number, '{}' from numbers(100000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('f', number)) from numbers(200000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('g', number)) from numbers(10000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('c', number)) from numbers(40000); +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(json)) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(json)) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03209_parallel_replicas_lost_decimal_conversion.reference b/tests/queries/0_stateless/03209_parallel_replicas_lost_decimal_conversion.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03209_parallel_replicas_lost_decimal_conversion.sql b/tests/queries/0_stateless/03209_parallel_replicas_lost_decimal_conversion.sql new file mode 100644 index 00000000000..bcc9dec306b --- /dev/null +++ b/tests/queries/0_stateless/03209_parallel_replicas_lost_decimal_conversion.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t_03209 SYNC; + +CREATE TABLE t_03209 ( `a` Decimal(18, 0), `b` Decimal(18, 1), `c` Decimal(36, 0) ) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_03209', 'r1') ORDER BY tuple(); +INSERT INTO t_03209 VALUES ('33', '44.4', '35'); + +SET max_parallel_replicas = 2, cluster_for_parallel_replicas='parallel_replicas'; + +SELECT * FROM t_03209 WHERE a IN toDecimal32('33.3000', 4) SETTINGS allow_experimental_parallel_reading_from_replicas=0; +SELECT * FROM t_03209 WHERE a IN toDecimal32('33.3000', 4) SETTINGS allow_experimental_parallel_reading_from_replicas=1; + +DROP TABLE t_03209 SYNC; diff --git a/tests/queries/0_stateless/03210_json_type_alter_add_column.reference.j2 b/tests/queries/0_stateless/03210_json_type_alter_add_column.reference.j2 new file mode 100644 index 00000000000..37b6854938a --- /dev/null +++ b/tests/queries/0_stateless/03210_json_type_alter_add_column.reference.j2 @@ -0,0 +1,72 @@ +initial insert +alter add column 1 +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +insert after alter add column +3 a.b +3 b.c +3 c.d +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +3 {"a":{"b":"3"}} 3 {"b":"3"} \N \N +4 {"a":{"b":"4"}} 4 {"b":"4"} \N \N +5 {"a":{"b":"5"}} 5 {"b":"5"} \N \N +6 {"b":{"c":"6"}} \N {} 6 \N +7 {"b":{"c":"7"}} \N {} 7 \N +8 {"b":{"c":"8"}} \N {} 8 \N +9 {"c":{"d":"9"}} \N {} \N 9 +10 {"c":{"d":"10"}} \N {} \N 10 +11 {"c":{"d":"11"}} \N {} \N 11 +12 {} \N {} \N \N +13 {} \N {} \N \N +14 {} \N {} \N \N +initial insert +alter add column 1 +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +insert after alter add column +3 a.b +3 b.c +3 c.d +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +3 {"a":{"b":"3"}} 3 {"b":"3"} \N \N +4 {"a":{"b":"4"}} 4 {"b":"4"} \N \N +5 {"a":{"b":"5"}} 5 {"b":"5"} \N \N +6 {"b":{"c":"6"}} \N {} 6 \N +7 {"b":{"c":"7"}} \N {} 7 \N +8 {"b":{"c":"8"}} \N {} 8 \N +9 {"c":{"d":"9"}} \N {} \N 9 +10 {"c":{"d":"10"}} \N {} \N 10 +11 {"c":{"d":"11"}} \N {} \N 11 +12 {} \N {} \N \N +13 {} \N {} \N \N +14 {} \N {} \N \N +initial insert +alter add column 1 +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +insert after alter add column +3 a.b +3 b.c +3 c.d +0 {} \N {} \N \N +1 {} \N {} \N \N +2 {} \N {} \N \N +3 {"a":{"b":"3"}} 3 {"b":"3"} \N \N +4 {"a":{"b":"4"}} 4 {"b":"4"} \N \N +5 {"a":{"b":"5"}} 5 {"b":"5"} \N \N +6 {"b":{"c":"6"}} \N {} 6 \N +7 {"b":{"c":"7"}} \N {} 7 \N +8 {"b":{"c":"8"}} \N {} 8 \N +9 {"c":{"d":"9"}} \N {} \N 9 +10 {"c":{"d":"10"}} \N {} \N 10 +11 {"c":{"d":"11"}} \N {} \N 11 +12 {} \N {} \N \N +13 {} \N {} \N \N +14 {} \N {} \N \N diff --git a/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 b/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 new file mode 100644 index 00000000000..add57928804 --- /dev/null +++ b/tests/queries/0_stateless/03210_json_type_alter_add_column.sql.j2 @@ -0,0 +1,34 @@ +-- Tags: no-fasttest, long + +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set allow_experimental_json_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (x UInt64) engine=Memory;', + 'create table test (x UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;', + 'create table test (x UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;'] -%} + +{{ create_command }} + +select 'initial insert'; +insert into test select number from numbers(3); + +select 'alter add column 1'; +alter table test add column json JSON settings mutations_sync=1; +select count(), arrayJoin(JSONAllPaths(json)) as path from test group by path order by count() desc, path; +select x, json, json.a.b, json.^a, json.b.c.:Int64, json.c.d from test order by x; + +select 'insert after alter add column'; +insert into test select number, toJSONString(map('a.b', number::UInt32)) from numbers(3, 3); +insert into test select number, toJSONString(map('b.c', number::UInt32)) from numbers(6, 3); +insert into test select number, toJSONString(map('c.d', number::UInt32)) from numbers(9, 3); +insert into test select number, '{}' from numbers(12, 3); +select count(), arrayJoin(JSONAllPaths(json)) as path from test group by path order by count() desc, path; +select x, json, json.a.b, json.^a, json.b.c.:Int64, json.c.d from test order by x; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference index d4734a85e72..4ecf7f56b07 100644 --- a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference @@ -38,3 +38,19 @@ 7 8 9 +15 \N 3 15 15 15 15 +14 \N 2 10 10 10 154 +13 \N 2 10 10 10 143 +12 \N 2 10 10 10 14 +11 \N 2 10 10 10 12 +10 \N 2 10 10 10 10 +9 \N 1 5 5 5 99 +8 \N 1 5 5 5 88 +7 \N 1 5 5 5 9 +6 \N 1 5 5 5 7 +5 \N 1 5 5 5 5 +4 \N 0 0 0 0 44 +3 \N 0 0 0 0 33 +2 \N 0 0 0 0 4 +1 \N 0 0 0 0 2 +0 \N 0 0 0 0 0 diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql index f6017ee6690..cc6746e428f 100644 --- a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql @@ -2,3 +2,23 @@ SELECT lagInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (OR SELECT leadInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); SELECT lagInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); SELECT leadInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); + +SELECT + number, + YYYYMMDDToDate(1, toLowCardinality(11), max(YYYYMMDDToDate(YYYYMMDDToDate(toLowCardinality(1), 11, materialize(NULL), 19700101.1, 1, 27, 7, materialize(toUInt256(37)), 9, 19, 9), 1, toUInt128(11), NULL, 19700101.1, 1, 27, 7, 37, 9, 19, 9), toUInt256(30)) IGNORE NULLS OVER w, NULL, 19700101.1, toNullable(1), 27, materialize(7), 37, 9, 19, 9), + p, + pp, + lagInFrame(number, number - pp) OVER w AS lag2, + lagInFrame(number, number - pp, number * 11) OVER w AS lag, + leadInFrame(number, number - pp, number * 11) OVER w AS lead +FROM +( + SELECT + number, + intDiv(number, 5) AS p, + p * 5 AS pp + FROM numbers(16) +) +WHERE toLowCardinality(1) +WINDOW w AS (PARTITION BY p ORDER BY number ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY number DESC NULLS LAST; diff --git a/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 b/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 new file mode 100644 index 00000000000..9b6ed82abed --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges.reference.j2 @@ -0,0 +1,136 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +600000 f +450000 c +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +750000 c +Shared data paths +600000 f +150000 d +150000 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +600000 f +450000 c +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +750000 c +Shared data paths +600000 f +150000 d +150000 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +600000 f +450000 c +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +750000 c +Shared data paths +600000 f +150000 d +150000 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +300000 c +150000 d +Shared data paths +Dynamic paths +300000 c +Shared data paths +150000 d +Dynamic paths +600000 f +300000 c +150000 e +Shared data paths +150000 d +Dynamic paths +600000 f +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +600000 f +450000 c +Shared data paths +300000 c +150000 d +150000 e +Dynamic paths +750000 c +Shared data paths +600000 f +150000 d +150000 e diff --git a/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 b/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 new file mode 100644 index 00000000000..0af998e22bb --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges.sql.j2 @@ -0,0 +1,63 @@ +-- Tags: no-fasttest, long, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} + +select '{{ create_command }}'; + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(100000); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(100000); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('d', x), range(number % 5 + 1)))) from numbers(50000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('b', arrayMap(x -> map('e', x), range(number % 5 + 1)))) from numbers(50000); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('f', x), range(number % 5 + 1)))) from numbers(200000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(150000); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03211_nested_json_merges_small.reference.j2 b/tests/queries/0_stateless/03211_nested_json_merges_small.reference.j2 new file mode 100644 index 00000000000..76339dba3e3 --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges_small.reference.j2 @@ -0,0 +1,136 @@ +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; +Dynamic paths +30 c +15 d +Shared data paths +Dynamic paths +30 c +Shared data paths +15 d +Dynamic paths +60 f +30 c +15 e +Shared data paths +15 d +Dynamic paths +60 f +Shared data paths +30 c +15 d +15 e +Dynamic paths +60 f +45 c +Shared data paths +30 c +15 d +15 e +Dynamic paths +75 c +Shared data paths +60 f +15 d +15 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +Dynamic paths +30 c +15 d +Shared data paths +Dynamic paths +30 c +Shared data paths +15 d +Dynamic paths +60 f +30 c +15 e +Shared data paths +15 d +Dynamic paths +60 f +Shared data paths +30 c +15 d +15 e +Dynamic paths +60 f +45 c +Shared data paths +30 c +15 d +15 e +Dynamic paths +75 c +Shared data paths +60 f +15 d +15 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +30 c +15 d +Shared data paths +Dynamic paths +30 c +Shared data paths +15 d +Dynamic paths +60 f +30 c +15 e +Shared data paths +15 d +Dynamic paths +60 f +Shared data paths +30 c +15 d +15 e +Dynamic paths +60 f +45 c +Shared data paths +30 c +15 d +15 e +Dynamic paths +75 c +Shared data paths +60 f +15 d +15 e +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +Dynamic paths +30 c +15 d +Shared data paths +Dynamic paths +30 c +Shared data paths +15 d +Dynamic paths +60 f +30 c +15 e +Shared data paths +15 d +Dynamic paths +60 f +Shared data paths +30 c +15 d +15 e +Dynamic paths +60 f +45 c +Shared data paths +30 c +15 d +15 e +Dynamic paths +75 c +Shared data paths +60 f +15 d +15 e diff --git a/tests/queries/0_stateless/03211_nested_json_merges_small.sql.j2 b/tests/queries/0_stateless/03211_nested_json_merges_small.sql.j2 new file mode 100644 index 00000000000..86e5a6c71c9 --- /dev/null +++ b/tests/queries/0_stateless/03211_nested_json_merges_small.sql.j2 @@ -0,0 +1,63 @@ +-- Tags: no-fasttest, long, no-tsan, no-asan, no-msan, no-ubsan + +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;', + 'create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1;'] -%} + +select '{{ create_command }}'; + +{{ create_command }} + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(10); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(10); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('d', x), range(number % 5 + 1)))) from numbers(5); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('b', arrayMap(x -> map('e', x), range(number % 5 + 1)))) from numbers(5); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('f', x), range(number % 5 + 1)))) from numbers(20); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +system stop merges test; +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(15); + +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +system start merges test; +optimize table test final; +select 'Dynamic paths'; +select count(), arrayJoin(JSONDynamicPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; +select 'Shared data paths'; +select count(), arrayJoin(JSONSharedDataPaths(arrayJoin(json.b[]))) as path from test group by path order by count() desc, path; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference index cd109daac52..13b1138d1c4 100644 --- a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference +++ b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.reference @@ -1,2 +1,2 @@ x Nullable(Int64) -schema_inference_hints=, max_rows_to_read_for_schema_inference=25000, max_bytes_to_read_for_schema_inference=1000, schema_inference_make_columns_nullable=true, try_infer_integers=true, try_infer_dates=true, try_infer_datetimes=true, try_infer_numbers_from_strings=false, read_bools_as_numbers=true, read_bools_as_strings=true, read_objects_as_strings=true, read_numbers_as_strings=true, read_arrays_as_strings=true, try_infer_objects_as_tuples=true, infer_incomplete_types_as_strings=true, try_infer_objects=false, use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects=false +schema_inference_hints=, max_rows_to_read_for_schema_inference=25000, max_bytes_to_read_for_schema_inference=1000, schema_inference_make_columns_nullable=true, try_infer_integers=true, try_infer_dates=true, try_infer_datetimes=true, try_infer_datetimes_only_datetime64=false, try_infer_numbers_from_strings=false, read_bools_as_numbers=true, read_bools_as_strings=true, read_objects_as_strings=true, read_numbers_as_strings=true, read_arrays_as_strings=true, try_infer_objects_as_tuples=true, infer_incomplete_types_as_strings=true, try_infer_objects=false, use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects=false diff --git a/tests/queries/0_stateless/03214_json_typed_dynamic_path.reference b/tests/queries/0_stateless/03214_json_typed_dynamic_path.reference new file mode 100644 index 00000000000..1b3e6b7a8db --- /dev/null +++ b/tests/queries/0_stateless/03214_json_typed_dynamic_path.reference @@ -0,0 +1,4 @@ +{"a":"42"} +{"a":["1","2","3"]} +{"a":"42"} +{"a":["1","2","3"]} diff --git a/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql new file mode 100644 index 00000000000..1f6a025825a --- /dev/null +++ b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest + +set allow_experimental_json_type = 1; +drop table if exists test; +create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +insert into test select '{"a" : 42}'; +insert into test select '{"a" : [1, 2, 3]}'; +optimize table test; +select * from test order by toString(json); +drop table test; + +create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=10000000, min_bytes_for_wide_part=10000000; +insert into test select '{"a" : 42}'; +insert into test select '{"a" : [1, 2, 3]}'; +optimize table test; +select * from test order by toString(json); +drop table test; diff --git a/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference b/tests/queries/0_stateless/03221_create_if_not_exists_setting.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql new file mode 100644 index 00000000000..18b3ed7bcec --- /dev/null +++ b/tests/queries/0_stateless/03221_create_if_not_exists_setting.sql @@ -0,0 +1,24 @@ +-- Tags: no-parallel + +SET create_if_not_exists=0; -- Default + +DROP TABLE IF EXISTS example_table; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; -- { serverError TABLE_ALREADY_EXISTS } + +DROP DATABASE IF EXISTS example_database; +CREATE DATABASE example_database; +CREATE DATABASE example_database; -- { serverError DATABASE_ALREADY_EXISTS } + +SET create_if_not_exists=1; + +DROP TABLE IF EXISTS example_table; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; + +DROP DATABASE IF EXISTS example_database; +CREATE DATABASE example_database; +CREATE DATABASE example_database; + +DROP DATABASE IF EXISTS example_database; +DROP TABLE IF EXISTS example_table; \ No newline at end of file diff --git a/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.reference b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.reference new file mode 100644 index 00000000000..68538c3f75b --- /dev/null +++ b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.reference @@ -0,0 +1,2 @@ +QUERY_WAS_CANCELLED +QUERY_WAS_CANCELLED diff --git a/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh new file mode 100755 index 00000000000..db943a665cb --- /dev/null +++ b/tests/queries/0_stateless/03221_insert_timeout_overflow_mode.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query "create table null_t (number UInt64) engine = Null;" +${CLICKHOUSE_CLIENT} --query "select sleep(0.1) from system.numbers settings max_block_size = 1 format Native" 2>/dev/null | ${CLICKHOUSE_CLIENT} --max_execution_time 0.3 --timeout_overflow_mode break --query "insert into null_t format Native" 2>&1 | grep -o "QUERY_WAS_CANCELLED" diff --git a/tests/queries/0_stateless/03221_key_condition_bug.reference b/tests/queries/0_stateless/03221_key_condition_bug.reference new file mode 100644 index 00000000000..e373ee695f6 --- /dev/null +++ b/tests/queries/0_stateless/03221_key_condition_bug.reference @@ -0,0 +1 @@ +50 diff --git a/tests/queries/0_stateless/03221_key_condition_bug.sql b/tests/queries/0_stateless/03221_key_condition_bug.sql new file mode 100644 index 00000000000..bac3e631a81 --- /dev/null +++ b/tests/queries/0_stateless/03221_key_condition_bug.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS report_metrics_v2 +( + `a` UInt64 +) Engine = MergeTree() +ORDER BY a; + +insert into report_metrics_v2 SELECT * FROM system.numbers LIMIT 50000; + +SELECT count(*) from report_metrics_v2 WHERE (intDiv(a, 50) = 200) AND (intDiv(a, 50000) = 0); + +DROP TABLE report_metrics_v2; diff --git a/tests/queries/0_stateless/03221_merge_profile_events.reference b/tests/queries/0_stateless/03221_merge_profile_events.reference new file mode 100644 index 00000000000..d969717336b --- /dev/null +++ b/tests/queries/0_stateless/03221_merge_profile_events.reference @@ -0,0 +1,3 @@ +Horizontal 1 20000 3 0 480000 1 1 1 1 +Vertical 1 20000 1 2 480000 1 1 1 1 1 1 +Vertical 2 400000 2 6 12800000 1 1 1 1 1 1 1 1 1 1 diff --git a/tests/queries/0_stateless/03221_merge_profile_events.sql b/tests/queries/0_stateless/03221_merge_profile_events.sql new file mode 100644 index 00000000000..1aa3dd266f8 --- /dev/null +++ b/tests/queries/0_stateless/03221_merge_profile_events.sql @@ -0,0 +1,90 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_merge_profile_events_1; + +CREATE TABLE t_merge_profile_events_1 (id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_merge_profile_events_1 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_1 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_1 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0 +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_1' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_1; + +DROP TABLE IF EXISTS t_merge_profile_events_2; + +CREATE TABLE t_merge_profile_events_2 (id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_merge_profile_events_2 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_2 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_2 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_2' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_2; + +DROP TABLE IF EXISTS t_merge_profile_events_3; + +CREATE TABLE t_merge_profile_events_3 (id UInt64, v1 UInt64, v2 UInt64, PROJECTION p (SELECT v2, v2 * v2, v2 * 2, v2 * 10, v1 ORDER BY v1)) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(100000); +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(100000); + +OPTIMIZE TABLE t_merge_profile_events_3 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeProjectionStageTotalMilliseconds'] > 0, + ProfileEvents['MergeProjectionStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] <= duration_ms, + ProfileEvents['MergeTotalMilliseconds'] <= duration_ms +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_3' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_3; diff --git a/tests/queries/0_stateless/03221_mutate_profile_events.reference b/tests/queries/0_stateless/03221_mutate_profile_events.reference new file mode 100644 index 00000000000..d094c37ff88 --- /dev/null +++ b/tests/queries/0_stateless/03221_mutate_profile_events.reference @@ -0,0 +1,2 @@ +3 2 1 10000 160000 0 1 1 1 +4 2 1 10000 320000 1 0 1 1 diff --git a/tests/queries/0_stateless/03221_mutate_profile_events.sh b/tests/queries/0_stateless/03221_mutate_profile_events.sh new file mode 100755 index 00000000000..3758db905e0 --- /dev/null +++ b/tests/queries/0_stateless/03221_mutate_profile_events.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Tags: no-random-settings, no-random-merge-tree-settings + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query " + DROP TABLE IF EXISTS t_mutate_profile_events; + + CREATE TABLE t_mutate_profile_events (key UInt64, id UInt64, v1 UInt64, v2 UInt64) + ENGINE = MergeTree ORDER BY id PARTITION BY key + SETTINGS min_bytes_for_wide_part = 0; + + INSERT INTO t_mutate_profile_events SELECT 1, number, number, number FROM numbers(10000); + INSERT INTO t_mutate_profile_events SELECT 2, number, number, number FROM numbers(10000); + + SET mutations_sync = 2; + + ALTER TABLE t_mutate_profile_events UPDATE v1 = 1000 WHERE key = 1; + ALTER TABLE t_mutate_profile_events DELETE WHERE key = 2 AND v2 % 10 = 0; +" + +# Mutation query may return before the entry is added to part log. +# So, we may have to retry the flush of logs until all entries are actually flushed. +for _ in {1..10}; do + ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" + res=$(${CLICKHOUSE_CLIENT} --query "SELECT count() FROM system.part_log WHERE database = currentDatabase() AND table = 't_mutate_profile_events' AND event_type = 'MutatePart'") + + if [[ $res -eq 4 ]]; then + break + fi + + sleep 2.0 +done + +${CLICKHOUSE_CLIENT} --query " + SELECT + splitByChar('_', part_name)[-1] AS version, + sum(ProfileEvents['MutationTotalParts']), + sum(ProfileEvents['MutationUntouchedParts']), + sum(ProfileEvents['MutatedRows']), + sum(ProfileEvents['MutatedUncompressedBytes']), + sum(ProfileEvents['MutationAllPartColumns']), + sum(ProfileEvents['MutationSomePartColumns']), + sum(ProfileEvents['MutationTotalMilliseconds']) > 0, + sum(ProfileEvents['MutationExecuteMilliseconds']) > 0, + FROM system.part_log + WHERE database = currentDatabase() AND table = 't_mutate_profile_events' AND event_type = 'MutatePart' + GROUP BY version ORDER BY version; + + DROP TABLE IF EXISTS t_mutate_profile_events; +" diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.reference b/tests/queries/0_stateless/03221_refreshable_matview_progress.reference new file mode 100644 index 00000000000..5ed392e61c7 --- /dev/null +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.reference @@ -0,0 +1,2 @@ +0 +4 4 1 diff --git a/tests/queries/0_stateless/03221_refreshable_matview_progress.sql b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql new file mode 100644 index 00000000000..98e1c48478d --- /dev/null +++ b/tests/queries/0_stateless/03221_refreshable_matview_progress.sql @@ -0,0 +1,20 @@ +-- Tags: no-replicated-database, no-ordinary-database + +set allow_experimental_refreshable_materialized_view=1; + +CREATE MATERIALIZED VIEW 03221_rmv +REFRESH AFTER 10 SECOND +( +x UInt64 +) +ENGINE = Memory +AS SELECT number AS x +FROM numbers(3) +UNION ALL +SELECT rand64() AS x; + +SELECT sleep(2); + +SELECT read_rows, total_rows, progress FROM system.view_refreshes WHERE database = currentDatabase() and view = '03221_rmv'; + +DROP TABLE 03221_rmv; diff --git a/tests/queries/0_stateless/03221_s3_imds_decent_timeout.reference b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh new file mode 100755 index 00000000000..021278955cd --- /dev/null +++ b/tests/queries/0_stateless/03221_s3_imds_decent_timeout.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-asan, no-msan, no-tsan +# ^ requires S3 + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Inaccessible IMDS should not introduce large delays, so this query should reply quickly at least sometimes: +while true +do + # This host (likely) drops packets sent to it (does not reply), so it is good for testing timeouts. + # At the same time, we expect that the clickhouse host does not drop packets and quickly replies with 4xx, which is a non-retriable error for S3. + AWS_EC2_METADATA_SERVICE_ENDPOINT='https://10.255.255.255/' ${CLICKHOUSE_LOCAL} --time --query "SELECT * FROM s3('${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/nonexistent')" |& grep -v -F 404 | + ${CLICKHOUSE_LOCAL} --input-format TSV "SELECT c1::Float64 < 1 FROM table" | grep 1 && break +done diff --git a/tests/queries/0_stateless/03222_date_time_inference.reference b/tests/queries/0_stateless/03222_date_time_inference.reference new file mode 100644 index 00000000000..221ab1fe5f5 --- /dev/null +++ b/tests/queries/0_stateless/03222_date_time_inference.reference @@ -0,0 +1,273 @@ +Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +2020-01-01 Date +String +2020_01_01 String +2020_1_01 String +2020_01_1 String +2020_1_1 String +2020a01a01 String +2020a1a01 String +2020a01a1 String +2020a1a1 String +20200101 String +DateTime +2020-01-02 18:42:42 DateTime +2020-01-02 18:42:42 DateTime +2020-01-02 18:42:42 DateTime +String +2020_01_01 42:42:42 String +2020a01a01 42:42:42 String +2020-01-01 42.42.42 String +2020-01-01 42 42 42 String +2020-01-01 42a42a42 String +DateTime64 +2020-01-02 18:42:42.424200000 DateTime64(9) +2020-01-02 18:42:42.424200000 DateTime64(9) +2020-01-02 18:42:42.424200000 DateTime64(9) +String +2020_01_01 42:42:42.4242 String +2020a01a01 42:42:42.4242 String +2020-01-01 42.42.42.4242 String +2020-01-01 42 42 42.4242 String +2020-01-01 42a42a42.4242 String +DateTime/DateTime64 best effort +2000-01-01 00:00:00 DateTime +2000-01-01 01:00:00 DateTime +2000-01-01 01:00:00.000000000 DateTime64(9) +02/01/17 010203 MSK String +02/01/17 010203.000 MSK String +02/01/17 010203 MSK+0100 String +02/01/17 010203.000 MSK+0100 String +02/01/17 010203 UTC+0300 String +02/01/17 010203.000 UTC+0300 String +02/01/17 010203Z String +02/01/17 010203.000Z String +02/01/1970 010203Z String +02/01/1970 010203.000Z String +02/01/70 010203Z String +02/01/70 010203.000Z String +2018-02-11 03:40:50 DateTime +2018-02-11 03:40:50.000000000 DateTime64(9) +2000-04-17 01:02:03 DateTime +2000-04-17 01:02:03.000000000 DateTime64(9) +19700102 01:00:00 String +19700102 01:00:00.000 String +19700102010203Z String +19700102010203Z.000 String +1970/01/02 010203Z String +1970/01/02 010203.000Z String +2015-12-31 20:00:00 DateTime +2015-12-31 20:00:00 DateTime +2016-01-01 00:00:00 DateTime +2016-01-01 00:00:00 DateTime +201701 02 010203 UTC+0300 String +201701 02 010203.000 UTC+0300 String +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 04:04:05 DateTime +2017-01-02 04:04:05.000000000 DateTime64(9) +2017-01-02 02:34:05 DateTime +2017-01-02 02:34:05.000000000 DateTime64(9) +2017-01-02 00:04:05 DateTime +2017-01-02 00:04:05.000000000 DateTime64(9) +2017-01-02 02:04:05 DateTime +2017-01-02 02:04:05.000000000 DateTime64(9) +2017-01-02 00:04:05 DateTime +2017-01-02 00:04:05.000000000 DateTime64(9) +2017-01-01 18:04:05 DateTime +2017-01-01 18:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-01 23:04:05 DateTime +2017-01-01 23:04:05.000000000 DateTime64(9) +2017-02-01 23:04:05 DateTime +2017-02-01 23:04:05.000000000 DateTime64(9) +2017-06-01 23:04:05 DateTime +2017-06-01 23:04:05.000000000 DateTime64(9) +2017-01-02 00:04:05 DateTime +2017-01-02 00:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-01-02 04:04:05 DateTime +2017-01-02 04:04:05.000000000 DateTime64(9) +2017-01-02 04:04:05 DateTime +2017-01-02 04:04:05.000000000 DateTime64(9) +2017-01-02 02:04:05 DateTime +2017-01-02 02:04:05.000000000 DateTime64(9) +2017-01-02 03:04:05 DateTime +2017-01-02 03:04:05.000000000 DateTime64(9) +2017-04-01 11:22:33 DateTime +2017-04-01 11:22:33.000000000 DateTime64(9) +2017 Apr 02 010203 UTC+0300 String +2017 Apr 02 010203.000 UTC+0300 String +2017-04-01 22:02:03 DateTime +2017-04-01 22:02:03.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-04-02 11:22:33 DateTime +2017-04-02 11:22:33.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-04-02 01:22:33 DateTime +2017-04-02 01:22:33.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-04-02 01:02:33 DateTime +2017-04-02 01:02:33.000000000 DateTime64(9) +2017-04-01 22:02:03 DateTime +2017-04-01 22:02:03.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017-04-01 22:02:03 DateTime +2017-04-01 22:02:03.000000000 DateTime64(9) +2017-04-01 21:02:03 DateTime +2017-04-01 21:02:03.000000000 DateTime64(9) +2017-04-02 01:02:03 DateTime +2017-04-02 01:02:03.000000000 DateTime64(9) +2017 Jan 02 010203 UTC+0300 String +2017 Jan 02 010203.000 UTC+0300 String +2017-04-25 01:02:03 DateTime +2017-04-25 01:02:03.000000000 DateTime64(9) +2017-04-25 01:02:03 DateTime +2017-04-25 01:02:03.000000000 DateTime64(9) +2017-01-25 01:02:03 DateTime +2017-01-25 01:02:03.000000000 DateTime64(9) +2017-01-24 22:02:03 DateTime +2017-01-24 22:02:03.000000000 DateTime64(9) +2017-01-25 13:02:03 DateTime +2017-01-25 13:02:03.000000000 DateTime64(9) +2017-01-25 01:02:03 DateTime +2017-01-25 01:02:03.000000000 DateTime64(9) +2017-01-25 01:02:03 DateTime +2017-01-25 01:02:03.000000000 DateTime64(9) +2017-01-24 22:02:03 DateTime +2017-01-24 22:02:03.000000000 DateTime64(9) +2017-01-24 22:02:03 DateTime +2017-01-24 22:02:03.000000000 DateTime64(9) +2017-01-25 10:02:03 DateTime +2017-01-25 10:02:03.000000000 DateTime64(9) +2017-01-25 10:02:03 DateTime +2017-01-25 10:02:03.000000000 DateTime64(9) +2017-01-25 10:02:03 DateTime +2017-01-25 10:02:03.000000000 DateTime64(9) +2017-01-25 09:32:03 DateTime +2017-01-25 09:32:03.000000000 DateTime64(9) +2017-01-25 01:02:03 DateTime +2017-01-25 01:02:03.000000000 DateTime64(9) +2017-01-25 13:02:03 DateTime +2017-01-25 13:02:03.000000000 DateTime64(9) +2017-01-25 13:02:03 DateTime +2017-01-25 13:02:03.000000000 DateTime64(9) +2017-01-25 10:02:03 DateTime +2017-01-25 10:02:03.000000000 DateTime64(9) +2018-02-11 03:40:50 DateTime +2018-02-11 03:40:50.000000000 DateTime64(9) +2018-02-11 03:40:50 DateTime +2018-02-11 03:40:50.000000000 DateTime64(9) +String +2 String +20 String +200 String +2000 String +20000 String +200001 String +2000010 String +20000101 String +200001010 String +2000010101 String +20000101010 String +200001010101 String +2000010101010 String +20000101010101 String +2.1 String +20.1 String +200.1 String +2000.1 String +20000.1 String +200001.1 String +2000010.1 String +20000101.1 String +200001010.1 String +2000010101.1 String +20000101010.1 String +200001010101.1 String +2000010101010.1 String +20000101010101.1 String +Mar String +Mar1 String +Mar 1 String +Mar01 String +Mar 01 String +Mar2020 String +Mar 2020 String +Mar012020 String +Mar 012020 String +Mar01012020 String +Mar 01012020 String +Mar0101202001 String +Mar 0101202001 String +Mar010120200101 String +Mar 010120200101 String +Mar01012020010101 String +Mar 01012020010101 String +Mar01012020010101.000 String +Mar 0101202001010101.000 String +Sun String +Sun1 String +Sun 1 String +Sun01 String +Sun 01 String +Sun2020 String +Sun 2020 String +Sun012020 String +Sun 012020 String +Sun01012020 String +Sun 01012020 String +Sun0101202001 String +Sun 0101202001 String +Sun010120200101 String +Sun 010120200101 String +Sun01012020010101 String +Sun 01012020010101 String +Sun01012020010101.000 String +Sun 0101202001010101.000 String +2000 01 01 01:00:00 String +2000 01 01 01:00:00.000 String +2000a01a01 01:00:00 String +2000a01a01 01:00:00.000 String +2000-01-01 01 00 00 String +2000-01-01 01 00 00.000 String +2000-01-01 01-00-00 String +2000-01-01 01-00-00.000 String +2000-01-01 01a00a00 String +2000-01-01 01a00a00.000 String +2000-01 01:00:00 String +2000-01 01:00:00.000 String +2000 01 String +2000-01 String +Mar 2000 00:00:00 String +Mar 2000 00:00:00.000 String +2000 00:00:00 String +2000 00:00:00.000 String +Mar 2000-01-01 00:00:00 String +Mar 2000-01-01 00:00:00.000 String +1.7.10 String diff --git a/tests/queries/0_stateless/03222_date_time_inference.sql b/tests/queries/0_stateless/03222_date_time_inference.sql new file mode 100644 index 00000000000..b16f72c72f4 --- /dev/null +++ b/tests/queries/0_stateless/03222_date_time_inference.sql @@ -0,0 +1,288 @@ +set input_format_try_infer_datetimes = 1; +set input_format_try_infer_dates = 1; +set schema_inference_make_columns_nullable = 0; +set input_format_json_try_infer_numbers_from_strings = 0; +set session_timezone = 'UTC'; + +select 'Date'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:1:01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:1:1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-1-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-1-1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/1/01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/1/1"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_1_01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_1_1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a1a01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a1a1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20200101"}'); + +select 'DateTime'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42:42:42"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42.42.42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42 42 42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42a42a42"}'); + +select 'DateTime64'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42:42:42.4242"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42.42.42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42 42 42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42a42a42.4242"}'); + +set date_time_input_format='best_effort'; +select 'DateTime/DateTime64 best effort'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 MSK+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 MSK+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/1970 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/1970 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/70 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/70 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "11 Feb 2018 06:40:50.000 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "17 Apr 2000 2 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "17 Apr 2000 2 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102010203Z.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1970/01/02 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1970/01/02 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01UTC"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "201701 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "201701 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+030"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+030"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+900"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+900"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05GMT"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000GMT"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD Feb"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD Feb"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD Jun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD Jun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05-0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000-0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 01 11:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 01 11:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 01:2:3 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 01:2:3.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:02:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:02:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 11:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 11:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:03"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:03.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0400"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0400"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 2 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 2 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Jan 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Jan 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 01:02:03"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 01:02:03.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z+03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z+03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +0300 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +0300 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z+03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z+03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +03:30 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +03:30 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z Mon"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z Mon"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z PM +03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z PM +03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 11 Feb 2018 06:40:50.000 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun, 11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun, 11 Feb 2018 06:40:50.000 +0300"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 0101202001010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 0101202001010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01 01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01 01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000a01a01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000a01a01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01 00 00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01 00 00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01-00-00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01-00-00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01a00a00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01a00a00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000 00:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 00:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000-01-01 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000-01-01 00:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1.7.10"}'); + diff --git a/tests/queries/0_stateless/03222_datetime64_small_value_const.reference b/tests/queries/0_stateless/03222_datetime64_small_value_const.reference new file mode 100644 index 00000000000..ae36c08acc5 --- /dev/null +++ b/tests/queries/0_stateless/03222_datetime64_small_value_const.reference @@ -0,0 +1,18 @@ +0 1970-01-01 00:00:00.000 +0 1970-01-01 00:00:05.000 +0 1970-01-01 00:45:25.456789 +0 1970-01-01 00:53:25.456789123 +0 \N +1 1970-01-01 00:00:00.000 +5 1970-01-01 00:00:00.000 +2 1970-01-01 00:00:02.456 +3 1970-01-01 00:00:04.811 +4 1970-01-01 00:10:05.000 +4 1970-01-01 00:10:05.000 +1 1970-01-01 00:00:00.000 +2 1970-01-01 00:00:02.456 +3 1970-01-01 00:00:04.811 +5 1970-01-01 00:00:00.000 +0 +0 +5 diff --git a/tests/queries/0_stateless/03222_datetime64_small_value_const.sql b/tests/queries/0_stateless/03222_datetime64_small_value_const.sql new file mode 100644 index 00000000000..a64ebd38ccf --- /dev/null +++ b/tests/queries/0_stateless/03222_datetime64_small_value_const.sql @@ -0,0 +1,44 @@ +-- Tags: shard +set session_timezone = 'UTC'; -- don't randomize the session timezone +SET allow_experimental_analyzer = 1; + +select *, (select toDateTime64(0, 3)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; +select *, (select toDateTime64(5, 3)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; +select *, (select toDateTime64('1970-01-01 00:45:25.456789', 6)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; +select *, (select toDateTime64('1970-01-01 00:53:25.456789123', 9)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; +select *, (select toDateTime64(null,3)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.dt64_03222; +drop table if exists shard_1.dt64_03222; +drop table if exists distr_03222_dt64; + +create table shard_0.dt64_03222(id UInt64, dt DateTime64(3)) engine = MergeTree order by id; +create table shard_1.dt64_03222(id UInt64, dt DateTime64(3)) engine = MergeTree order by id; +create table distr_03222_dt64 (id UInt64, dt DateTime64(3)) engine = Distributed(test_cluster_two_shards_different_databases, '', dt64_03222); + +insert into shard_0.dt64_03222 values(1, toDateTime64('1970-01-01 00:00:00.000',3)); +insert into shard_0.dt64_03222 values(2, toDateTime64('1970-01-01 00:00:02.456',3)); +insert into shard_1.dt64_03222 values(3, toDateTime64('1970-01-01 00:00:04.811',3)); +insert into shard_1.dt64_03222 values(4, toDateTime64('1970-01-01 00:10:05',3)); +insert into shard_1.dt64_03222 values(5, toDateTime64(0,3)); + +--Output : 1,5 2,3,4 4 1,2,3,5 0 0 5 +select id, dt from distr_03222_dt64 where dt = (select toDateTime64(0,3)) order by id; +select id, dt from distr_03222_dt64 where dt > (select toDateTime64(0,3)) order by id; +select id, dt from distr_03222_dt64 where dt > (select toDateTime64('1970-01-01 00:10:00.000',3)) order by id; +select id, dt from distr_03222_dt64 where dt < (select toDateTime64(5,3)) order by id; + +select count(*) from distr_03222_dt64 where dt > (select toDateTime64('2024-07-20 00:00:00',3)); +select count(*) from distr_03222_dt64 where dt > (select now()); +select count(*) from distr_03222_dt64 where dt < (select toDateTime64('2004-07-20 00:00:00',3)); + + +drop table if exists shard_0.dt64_03222; +drop table if exists shard_1.dt64_03222; +drop table if exists distr_03222_dt64; + +drop database shard_0; +drop database shard_1; diff --git a/tests/queries/0_stateless/03222_json_squashing.reference b/tests/queries/0_stateless/03222_json_squashing.reference new file mode 100644 index 00000000000..d0c19d8239a --- /dev/null +++ b/tests/queries/0_stateless/03222_json_squashing.reference @@ -0,0 +1,102 @@ +All paths +a +b +c +d +Dynamic paths +a +b +c +d +Shared data paths +All paths +a +b +c +d +e +f +Dynamic paths +a +b +c +d +e +f +Shared data paths +All paths +a +b +c +d +Dynamic paths +c +d +Shared data paths +a +b +All paths +a +b +c +d +e +f +Dynamic paths +a +b +Shared data paths +c +d +e +f +All paths +a +b +c +d +e +Dynamic paths +a +e +Shared data paths +b +c +d +All paths +b +c +d +e +Dynamic paths +d +e +Shared data paths +b +c +All paths +b +c +d +e +f +g +Dynamic paths +b +c +Shared data paths +d +e +f +g +All paths +b +d +e +f +Dynamic paths +b +f +Shared data paths +d +e diff --git a/tests/queries/0_stateless/03222_json_squashing.sql b/tests/queries/0_stateless/03222_json_squashing.sql new file mode 100644 index 00000000000..53090c5cb88 --- /dev/null +++ b/tests/queries/0_stateless/03222_json_squashing.sql @@ -0,0 +1,82 @@ +-- Tags: long + +set allow_experimental_json_type = 1; +set max_block_size = 1000; + +drop table if exists test; + +create table test (json JSON) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON, number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON, number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON, '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +drop table test; +create table test (json JSON(max_dynamic_paths=2)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=2), number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON(max_dynamic_paths=2)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42}'::JSON(max_dynamic_paths=2), number < 3000, '{"b" : "Hello", "c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43}'::JSON(max_dynamic_paths=2)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +drop table test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00", "g" : "Hello2"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference new file mode 100644 index 00000000000..b6c452ba328 --- /dev/null +++ b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.reference @@ -0,0 +1,21 @@ +1006 +1007 +1008 +1009 +101 +1010 +1011 +1012 +1013 +1014 +--- +100 100 +101 101 +102 102 +103 103 +104 104 +105 105 +106 106 +107 107 +108 108 +109 109 diff --git a/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql new file mode 100644 index 00000000000..6f486f8f0fe --- /dev/null +++ b/tests/queries/0_stateless/03222_parallel_replicas_min_marks_to_read_overflow.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test__fuzz_22 SYNC; + +CREATE TABLE test__fuzz_22 (k Float32, v String) ENGINE = ReplicatedMergeTree('/clickhouse/03222/{database}/test__fuzz_22', 'r1') ORDER BY k SETTINGS index_granularity = 1; + +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(10_000); + +SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +SELECT v +FROM test__fuzz_22 +ORDER BY v +LIMIT 10, 10 +SETTINGS merge_tree_min_rows_for_concurrent_read = 9223372036854775806; + +SELECT '---'; + +SELECT k, v +FROM test__fuzz_22 +ORDER BY k +LIMIT 100, 10 +SETTINGS optimize_read_in_order=1, merge_tree_min_rows_for_concurrent_read = 9223372036854775806; + +DROP TABLE test__fuzz_22 SYNC; diff --git a/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.reference b/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql b/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql new file mode 100644 index 00000000000..f3bccc79b3f --- /dev/null +++ b/tests/queries/0_stateless/03223_analyzer_with_cube_fuzz.sql @@ -0,0 +1,29 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`a` Int64, `b` Int64) ENGINE = MergeTree ORDER BY a; +CREATE TABLE t2 (`key` Int32, `val` Int64) ENGINE = MergeTree ORDER BY key; +insert into t1 Select number, number from numbers(100000); +insert into t2 Select number, number from numbers(100000); + + +SELECT + 1 * 1000.0001, + (count(1.) = -2147483647) AND (count(a) = 1.1920928955078125e-7) AND (count(val) = 1048577) AND (sum(val) = ((NULL * 1048576) / -9223372036854775807)) AND (sum(a) = ((9223372036854775806 * 10000000000.) / 1048575)) +FROM +( + SELECT + a, + val + FROM t1 + FULL OUTER JOIN t2 ON (t1.a = t2.key) OR (1 * inf) OR (t1.b = t2.key) +) +GROUP BY '65537' + WITH CUBE +FORMAT Null +SETTINGS max_block_size = 100, join_use_nulls = 1, max_execution_time = 1., max_result_rows = 0, max_result_bytes = 0; -- { serverError TIMEOUT_EXCEEDED } + +DROP TABLE t1; +DROP TABLE t2; diff --git a/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.reference b/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.reference new file mode 100644 index 00000000000..6c01506e800 --- /dev/null +++ b/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.reference @@ -0,0 +1,81 @@ +All paths +['b'] +['b'] +['b'] +['b'] +['b'] +['c'] +['c'] +['c'] +['c'] +['c'] +Dynamic paths +['b'] +['b'] +['b'] +['b'] +['b'] +[] +[] +[] +[] +[] +Shared data paths +[] +[] +[] +[] +[] +['c'] +['c'] +['c'] +['c'] +['c'] +All paths +['b'] +['b'] +['b'] +['b'] +['b'] +['c'] +['c'] +['c'] +['c'] +['c'] +['b'] +['b'] +['b'] +['b'] +['b'] +Dynamic paths +['b'] +['b'] +['b'] +['b'] +['b'] +['c'] +['c'] +['c'] +['c'] +['c'] +['b'] +['b'] +['b'] +['b'] +['b'] +Shared data paths +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] +[] diff --git a/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.sql b/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.sql new file mode 100644 index 00000000000..311eba37772 --- /dev/null +++ b/tests/queries/0_stateless/03223_nested_json_in_shared_data_merges.sql @@ -0,0 +1,26 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1; +insert into test select materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +insert into test select materialize('{"aa1" : 42, "aa2" : 42, "aa3" : 42, "aa4" : 42, "aa5" : 42, "aa6" : 42, "aa7" : 42, "aa8" : 42, "a" : [{"c" : 42}]}') from numbers(5); +optimize table test final; + +select 'All paths'; +select JSONAllPaths(arrayJoin(json.a[])) from test; +select 'Dynamic paths'; +select JSONDynamicPaths(arrayJoin(json.a[])) from test; +select 'Shared data paths'; +select JSONSharedDataPaths(arrayJoin(json.a[])) from test; + +insert into test select materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; + +select 'All paths'; +select JSONAllPaths(arrayJoin(json.a[])) from test; +select 'Dynamic paths'; +select JSONDynamicPaths(arrayJoin(json.a[])) from test; +select 'Shared data paths'; +select JSONSharedDataPaths(arrayJoin(json.a[])) from test; + +drop table test; diff --git a/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.reference b/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.sql b/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.sql new file mode 100644 index 00000000000..984c7fe0db7 --- /dev/null +++ b/tests/queries/0_stateless/03223_parallel_replicas_read_task_size_overflow_bug.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS test__fuzz_22 SYNC; + +CREATE TABLE test__fuzz_22 (k Float32, v String) ENGINE = MergeTree ORDER BY k SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES test__fuzz_22; + +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(1); +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(1); +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(1); +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(1); + +SET allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', + merge_tree_min_rows_for_concurrent_read = 9223372036854775806, merge_tree_min_rows_for_concurrent_read_for_remote_filesystem = 9223372036854775806; + + SELECT v + FROM test__fuzz_22 +ORDER BY v + LIMIT 10, 10 +SETTINGS max_threads = 4 + FORMAT Null; -- { serverError BAD_ARGUMENTS } + +DROP TABLE test__fuzz_22 SYNC; diff --git a/tests/queries/0_stateless/03223_system_tables_set_not_ready.reference b/tests/queries/0_stateless/03223_system_tables_set_not_ready.reference new file mode 100644 index 00000000000..e39523ed4f5 --- /dev/null +++ b/tests/queries/0_stateless/03223_system_tables_set_not_ready.reference @@ -0,0 +1,5 @@ +system.distribution_queue 1 +system.rocksdb 1 +system.databases 1 +system.mutations 1 +system.replication_queue 1 diff --git a/tests/queries/0_stateless/03223_system_tables_set_not_ready.sql b/tests/queries/0_stateless/03223_system_tables_set_not_ready.sql new file mode 100644 index 00000000000..907fa47143c --- /dev/null +++ b/tests/queries/0_stateless/03223_system_tables_set_not_ready.sql @@ -0,0 +1,30 @@ +-- Tags: no-fasttest +-- Tag no-fasttest -- due to EmbeddedRocksDB + +drop table if exists null; +drop table if exists dist; +create table null as system.one engine=Null; +create table dist as null engine=Distributed(test_cluster_two_shards, currentDatabase(), 'null', rand()); +insert into dist settings prefer_localhost_replica=0 values (1); +select 'system.distribution_queue', count() from system.distribution_queue where exists(select 1) and database = currentDatabase(); + +drop table if exists rocksdb; +create table rocksdb (key Int) engine=EmbeddedRocksDB() primary key key; +insert into rocksdb values (1); +select 'system.rocksdb', count()>0 from system.rocksdb where exists(select 1) and database = currentDatabase(); + +select 'system.databases', count() from system.databases where exists(select 1) and database = currentDatabase(); + +drop table if exists mt; +create table mt (key Int) engine=MergeTree() order by key; +alter table mt delete where 1; +select 'system.mutations', count() from system.mutations where exists(select 1) and database = currentDatabase(); + +drop table if exists rep1; +drop table if exists rep2; +create table rep1 (key Int) engine=ReplicatedMergeTree('/{database}/rep', '{table}') order by key; +create table rep2 (key Int) engine=ReplicatedMergeTree('/{database}/rep', '{table}') order by key; +system stop fetches rep2; +insert into rep1 values (1); +system sync replica rep2 pull; +select 'system.replication_queue', count()>0 from system.replication_queue where exists(select 1) and database = currentDatabase(); diff --git a/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.reference b/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.reference new file mode 100644 index 00000000000..9d58b3a35db --- /dev/null +++ b/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.reference @@ -0,0 +1,4 @@ +Array(Nullable(Int64)) true +Int64 false +Array(Nullable(Int64)) false +Int64 false diff --git a/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.sql b/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.sql new file mode 100644 index 00000000000..c96d67c0d47 --- /dev/null +++ b/tests/queries/0_stateless/03224_json_merges_new_type_in_shared_data.sql @@ -0,0 +1,12 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=1)) engine=MergeTree order by tuple() settings min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; +insert into test select '{"b" : 42}' from numbers(5); +insert into test select '{"a" : 42, "b" : [1, 2, 3]}' from numbers(5); +optimize table test final; +select distinct dynamicType(json.b) as type, isDynamicElementInSharedData(json.b) from test order by type; +insert into test select '{"b" : 42}' from numbers(5); +optimize table test final; +select distinct dynamicType(json.b) as type, isDynamicElementInSharedData(json.b) from test order by type; +drop table test; diff --git a/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.reference b/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.reference new file mode 100644 index 00000000000..b45d9bb97da --- /dev/null +++ b/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.reference @@ -0,0 +1,22 @@ +Array(JSON(max_dynamic_types=16, max_dynamic_paths=2)) true +Int64 false +Array(JSON(max_dynamic_types=16, max_dynamic_paths=2)) false +Int64 false +['c'] +['d'] +Array(JSON(max_dynamic_types=16, max_dynamic_paths=2)) false +Int64 false +['c'] +['d'] +Int64 true +None false +Int64 true +None false +Array(JSON(max_dynamic_types=16, max_dynamic_paths=2)) false +Int64 false +['c'] +['d'] +Int64 false +None false +Int64 false +None false diff --git a/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.sql b/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.sql new file mode 100644 index 00000000000..b22b8b4fb75 --- /dev/null +++ b/tests/queries/0_stateless/03224_nested_json_merges_new_type_in_shared_data.sql @@ -0,0 +1,25 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +insert into test select materialize('{"a1" : 42, "a2" : 42, "a3" : 42, "a4" : 42, "a5" : 42, "a6" : 42, "a7" : 42, "a8" : 42, "a" : [{"c" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +insert into test select materialize('{"a1" : 42, "a2" : 42, "a3" : 42, "a4" : 42, "a5" : 42, "a6" : 42, "a7" : 42, "a8" : 42, "a" : [{"d" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONSharedDataPaths(arrayJoin(json.a[])) as path from test order by path; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONDynamicPaths(arrayJoin(json.a[])) as path from test order by path; +select distinct dynamicType(arrayJoin(json.a[].c)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].c)) from test order by type; +select distinct dynamicType(arrayJoin(json.a[].d)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].d)) from test order by type; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONDynamicPaths(arrayJoin(json.a[])) as path from test order by path; +select distinct dynamicType(arrayJoin(json.a[].c)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].c)) from test order by type; +select distinct dynamicType(arrayJoin(json.a[].d)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].d)) from test order by type; +drop table test; diff --git a/tests/queries/0_stateless/03224_tuple_element_identifier.reference b/tests/queries/0_stateless/03224_tuple_element_identifier.reference new file mode 100644 index 00000000000..0fc9e7410c1 --- /dev/null +++ b/tests/queries/0_stateless/03224_tuple_element_identifier.reference @@ -0,0 +1,4 @@ +([('wtf')]) [('wtf')] wtf +([('wtf')]) [('wtf')] wtf +Hello +('Hello') Hello Hello Hello diff --git a/tests/queries/0_stateless/03224_tuple_element_identifier.sql b/tests/queries/0_stateless/03224_tuple_element_identifier.sql new file mode 100644 index 00000000000..2a7fb9a97a3 --- /dev/null +++ b/tests/queries/0_stateless/03224_tuple_element_identifier.sql @@ -0,0 +1,13 @@ +SET enable_analyzer = 1; + +SELECT JSONExtract('{"hello":[{"world":"wtf"}]}', 'Tuple(hello Array(Tuple(world String)))') AS x, + x.hello, x.hello[1].world; + +SELECT JSONExtract('{"hello":[{" wow ":"wtf"}]}', 'Tuple(hello Array(Tuple(` wow ` String)))') AS x, + x.hello, x.hello[1].` wow `; + +SELECT JSONExtract('{"hello":[{" wow ":"wtf"}]}', 'Tuple(hello Array(Tuple(` wow ` String)))') AS x, + x.hello, x.hello[1].`wow`; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK } + +SELECT ('Hello' AS world,).world; +SELECT ('Hello' AS world,) AS t, t.world, (t).world, identity(t).world; diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.reference b/tests/queries/0_stateless/03225_alter_to_json_not_supported.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql b/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql new file mode 100644 index 00000000000..398494d56de --- /dev/null +++ b/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql @@ -0,0 +1,15 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (s String) engine=MergeTree order by tuple(); +alter table test modify column s JSON; -- { serverError BAD_ARGUMENTS } +drop table test; + +create table test (s Array(String)) engine=MergeTree order by tuple(); +alter table test modify column s Array(JSON); -- { serverError BAD_ARGUMENTS } +drop table test; + +create table test (s Tuple(String, String)) engine=MergeTree order by tuple(); +alter table test modify column s Tuple(JSON, String); -- { serverError BAD_ARGUMENTS } +drop table test; + diff --git a/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.reference b/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.sql b/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.sql new file mode 100644 index 00000000000..720f8670c83 --- /dev/null +++ b/tests/queries/0_stateless/03226_alter_update_dynamic_json_not_supported.sql @@ -0,0 +1,9 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (d Dynamic, json JSON) engine=MergeTree order by tuple(); +alter table test update d = 42 where 1; -- {serverError CANNOT_UPDATE_COLUMN} +alter table test update json = '{}' where 1; -- {serverError CANNOT_UPDATE_COLUMN} +drop table test; + diff --git a/tests/queries/0_stateless/03227_json_invalid_regexp.reference b/tests/queries/0_stateless/03227_json_invalid_regexp.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03227_json_invalid_regexp.sql b/tests/queries/0_stateless/03227_json_invalid_regexp.sql new file mode 100644 index 00000000000..d98e2ade29d --- /dev/null +++ b/tests/queries/0_stateless/03227_json_invalid_regexp.sql @@ -0,0 +1,4 @@ +set allow_experimental_json_type = 1; +create table test (json JSON(SKIP REGEXP '[]')) engine=Memory(); -- {serverError CANNOT_COMPILE_REGEXP} +create table test (json JSON(SKIP REGEXP '+')) engine=Memory(); -- {serverError CANNOT_COMPILE_REGEXP}; + diff --git a/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference new file mode 100644 index 00000000000..afaaaaa6119 --- /dev/null +++ b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.reference @@ -0,0 +1,56 @@ + +SHOW CREATE TABLE: +CREATE TABLE default.test +( + `x` Tuple( + a String, + b Array(Tuple( + c Tuple( + e String), + d String))), + `y` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192 +CREATE TABLE default.test +( + `x` Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), + `y` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192 + +clickhouse-format: +CREATE TABLE test +( + `x` Tuple( + a String, + b Array(Tuple( + c Tuple( + e String), + d String))), + `y` String +) +ORDER BY tuple() +CREATE TABLE test (`x` Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), `y` String) ORDER BY tuple() + +formatQuery: +CREATE TABLE test +( + `x` Tuple( + a String, + b Array(Tuple( + c Tuple( + e String), + d String))), + `y` String +) +ORDER BY tuple() +CREATE TABLE test +( + `x` Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), + `y` String +) +ORDER BY tuple() diff --git a/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.sh b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.sh new file mode 100755 index 00000000000..e5614f9f228 --- /dev/null +++ b/tests/queries/0_stateless/03227_print_pretty_tuples_create_query.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-asan, no-msan, no-tsan +# ^ requires S3 + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo +echo "SHOW CREATE TABLE:" +${CLICKHOUSE_CLIENT} --output-format Raw --query " + DROP TABLE IF EXISTS test; + CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY (); + SET print_pretty_type_names = 1; + SHOW CREATE TABLE test; + SET print_pretty_type_names = 0; + SHOW CREATE TABLE test; + DROP TABLE test; +" + +echo +echo "clickhouse-format:" +${CLICKHOUSE_FORMAT} --query " + CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY () +" +${CLICKHOUSE_FORMAT} --oneline --query " + CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY () +" + +echo +echo "formatQuery:" +${CLICKHOUSE_CLIENT} --output-format Raw --query " + SELECT formatQuery('CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY ()') SETTINGS print_pretty_type_names = 1; + SELECT formatQuery('CREATE TABLE test (x Tuple(a String, b Array(Tuple(c Tuple(e String), d String))), y String) ORDER BY ()') SETTINGS print_pretty_type_names = 0; +" diff --git a/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.reference b/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.reference new file mode 100644 index 00000000000..2127d396bb3 --- /dev/null +++ b/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.reference @@ -0,0 +1,4 @@ +414243 +ABC +A +{"a": \'A\'} diff --git a/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.sql b/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.sql new file mode 100644 index 00000000000..0c2e7dc582a --- /dev/null +++ b/tests/queries/0_stateless/03227_proper_parsing_of_cast_operator.sql @@ -0,0 +1,6 @@ +SELECT '414243'::String; +SELECT x'414243'::String; +SELECT b'01000001'::String; +SELECT '{"a": \'\x41\'}'::String; +SELECT '{"a": \'\x4\'}'::String; -- { clientError SYNTAX_ERROR } +SELECT '{"a": \'a\x4\'}'::String; -- { clientError SYNTAX_ERROR } diff --git a/tests/queries/0_stateless/data_parquet/68131.parquet b/tests/queries/0_stateless/data_parquet/68131.parquet new file mode 100644 index 00000000000..169f6152003 Binary files /dev/null and b/tests/queries/0_stateless/data_parquet/68131.parquet differ diff --git a/tests/queries/0_stateless/replication.lib b/tests/queries/0_stateless/replication.lib index 05651531fba..36309cf0331 100755 --- a/tests/queries/0_stateless/replication.lib +++ b/tests/queries/0_stateless/replication.lib @@ -89,7 +89,7 @@ function check_replication_consistency() # Touch all data to check that it's readable (and trigger PartCheckThread if needed) # it's important to disable prefer warmed unmerged parts because # otherwise it can read non-syncrhonized state of replicas - while ! $CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do + while ! $CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 --max_result_rows 0 --max_result_bytes 0 --max_rows_to_read 0 --max_bytes_to_read 0 -q "SELECT * FROM merge(currentDatabase(), '$table_name_prefix') FORMAT Null" 2>/dev/null; do sleep 1; num_tries=$((num_tries+1)) if [ $num_tries -eq 250 ]; then @@ -114,7 +114,8 @@ function check_replication_consistency() # it's important to disable prefer warmed unmerged parts because # otherwise it can read non-syncrhonized state of replicas - res=$($CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 -q \ + # also, disable the limit that is set for tests globally + res=$($CLICKHOUSE_CLIENT --prefer_warmed_unmerged_parts_seconds=0 --max_rows_to_read=0 -q \ "SELECT if((countDistinct(data) as c) == 0, 1, c) FROM diff --git a/tests/queries/1_stateful/00067_union_all.sql b/tests/queries/1_stateful/00067_union_all.sql index 2a1d00e975d..9ee14b36b03 100644 --- a/tests/queries/1_stateful/00067_union_all.sql +++ b/tests/queries/1_stateful/00067_union_all.sql @@ -10,4 +10,5 @@ UNION ALL ORDER BY id DESC LIMIT 10 ) -ORDER BY id, event; +ORDER BY id, event +SETTINGS max_rows_to_read = 40_000_000; diff --git a/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql index 52f9c46997f..8f18f3740e4 100644 --- a/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql +++ b/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql @@ -1,4 +1,4 @@ -- Tags: shard -SET output_format_write_statistics = 0; +SET output_format_write_statistics = 0, max_rows_to_read = 50_000_000; SELECT EventDate, count() FROM remote('127.0.0.1', test.hits) WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits) GROUP BY EventDate ORDER BY EventDate LIMIT 5 FORMAT JSONCompact; diff --git a/tests/queries/1_stateful/00147_global_in_aggregate_function.sql b/tests/queries/1_stateful/00147_global_in_aggregate_function.sql index 075c01530c6..f0b249e9af4 100644 --- a/tests/queries/1_stateful/00147_global_in_aggregate_function.sql +++ b/tests/queries/1_stateful/00147_global_in_aggregate_function.sql @@ -1,4 +1,5 @@ -- Tags: global +SET max_rows_to_read = 100_000_000; SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); diff --git a/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql index 6f910646fb7..16b565985ea 100644 --- a/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql +++ b/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql @@ -1,4 +1,5 @@ -- Tags: distributed +SET max_rows_to_read = 100_000_000; SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID); SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1; diff --git a/tests/queries/1_stateful/00157_cache_dictionary.sql b/tests/queries/1_stateful/00157_cache_dictionary.sql index 3621ff82126..f1bee538828 100644 --- a/tests/queries/1_stateful/00157_cache_dictionary.sql +++ b/tests/queries/1_stateful/00157_cache_dictionary.sql @@ -9,7 +9,8 @@ ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS storage_policy = 'default'; -INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000; +INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000 + SETTINGS min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_block_size = 8192, max_insert_threads = 1, max_threads = 1; CREATE DATABASE IF NOT EXISTS db_dict; DROP DICTIONARY IF EXISTS db_dict.cache_hits; diff --git a/tests/queries/1_stateful/00158_cache_dictionary_has.sql b/tests/queries/1_stateful/00158_cache_dictionary_has.sql index 32c109417de..631a7751550 100644 --- a/tests/queries/1_stateful/00158_cache_dictionary_has.sql +++ b/tests/queries/1_stateful/00158_cache_dictionary_has.sql @@ -10,6 +10,8 @@ SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hits' PA LIFETIME(MIN 300 MAX 600) LAYOUT(CACHE(SIZE_IN_CELLS 100 QUERY_WAIT_TIMEOUT_MILLISECONDS 600000)); +SET timeout_before_checking_execution_speed = 300; + SELECT sum(flag) FROM (SELECT dictHas('db_dict.cache_hits', toUInt64(WatchID)) as flag FROM test.hits PREWHERE WatchID % 1400 == 0 LIMIT 100); SELECT count() from test.hits PREWHERE WatchID % 1400 == 0; @@ -20,4 +22,4 @@ SELECT sum(flag) FROM (SELECT dictHas('db_dict.cache_hits', toUInt64(WatchID)) a SELECT count() from test.hits PREWHERE WatchID % 5 == 0; DROP DICTIONARY IF EXISTS db_dict.cache_hits; -DROP DATABASE IF EXISTS db_dict; +DROP DATABASE IF EXISTS db_dict; diff --git a/tests/queries/1_stateful/00167_read_bytes_from_fs.sql b/tests/queries/1_stateful/00167_read_bytes_from_fs.sql index 7b3f50f8141..184a8edcbcb 100644 --- a/tests/queries/1_stateful/00167_read_bytes_from_fs.sql +++ b/tests/queries/1_stateful/00167_read_bytes_from_fs.sql @@ -1,5 +1,6 @@ -- Tags: no-random-settings +SET max_memory_usage = '10G'; SELECT sum(cityHash64(*)) FROM test.hits SETTINGS max_threads=40; -- We had a bug which lead to additional compressed data read. test.hits compressed size is about 1.2Gb, but we read more then 3Gb. diff --git a/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql b/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql index 7068780a1b1..b3e4d749328 100644 --- a/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql +++ b/tests/queries/1_stateful/00171_grouping_aggregated_transform_bug.sql @@ -1,4 +1,5 @@ -- Tags: distributed +SET max_rows_to_read = '100M'; SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS max_block_size = 63169; SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1, max_block_size = 63169; diff --git a/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql b/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql index e73de4b33fb..26e112cff04 100644 --- a/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql +++ b/tests/queries/1_stateful/00182_simple_squashing_transform_bug.sql @@ -1,6 +1,7 @@ -- Tags: global set allow_prefetched_read_pool_for_remote_filesystem=0, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0, max_threads=2, max_block_size=65387; +set max_rows_to_read = '100M'; SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index ce7c666912e..0be6256aa50 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -385,12 +385,26 @@ IntelliJ IntelliSense InterserverConnection InterserverThreads +IntervalDay +IntervalHour +IntervalMicrosecond +IntervalMillisecond +IntervalMilliseconds +IntervalMinute +IntervalMonth +IntervalNanosecond +IntervalQuarter +IntervalSecond +IntervalWeek +IntervalYear IsPentagon IsResClassIII IsValid JBOD JOINed JOINs +JSONAllPaths +JSONAllPathsWithTypes JSONArrayLength JSONAsObject JSONAsString @@ -405,6 +419,8 @@ JSONCompactStrings JSONCompactStringsEachRow JSONCompactStringsEachRowWithNames JSONCompactStringsEachRowWithNamesAndTypes +JSONDynamicPaths +JSONDynamicPathsWithTypes JSONEachRow JSONEachRowWithProgress JSONExtract @@ -424,6 +440,8 @@ JSONObjectEachRow JSONStrings JSONStringsEachRow JSONStringsEachRowWithProgress +JSONSharedDataPaths +JSONSharedDataPathsWithTypes JSONType JSONs Jaeger @@ -2121,8 +2139,10 @@ namespace namespaces natively nats +ness nestjs netloc +newjson ngram ngramDistance ngramDistanceCaseInsensitive @@ -2732,6 +2752,17 @@ toISOWeek toISOYear toInt toInterval +toIntervalDay +toIntervalHour +toIntervalMicrosecond +toIntervalMillisecond +toIntervalMinute +toIntervalMonth +toIntervalNanosecond +toIntervalQuarter +toIntervalSecond +toIntervalWeek +toIntervalYear toJSONString toLastDayOfMonth toLastDayOfWeek diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 3c959617d02..46593e85e45 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -467,3 +467,7 @@ find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | grep -vP $EXCLUDE_DIRS | xargs grep -F -i 'ErrorCodes::LOGICAL_ERROR, "Logical error:' && echo "If an exception has LOGICAL_ERROR code, there is no need to include the text 'Logical error' in the exception message, because then the phrase 'Logical error' will be printed twice." + +PATTERN="allow_"; +DIFF=$(comm -3 <(grep -o "\b$PATTERN\w*\b" $ROOT_PATH/src/Core/Settings.h | sort -u) <(grep -o -h "\b$PATTERN\w*\b" $ROOT_PATH/src/Databases/enableAllExperimentalSettings.cpp $ROOT_PATH/utils/check-style/experimental_settings_ignore.txt | sort -u)); +[ -n "$DIFF" ] && echo "$DIFF" && echo "^^ Detected 'allow_*' settings that might need to be included in src/Databases/enableAllExperimentalSettings.cpp" && echo "Alternatively, consider adding an exception to utils/check-style/experimental_settings_ignore.txt" diff --git a/utils/check-style/experimental_settings_ignore.txt b/utils/check-style/experimental_settings_ignore.txt new file mode 100644 index 00000000000..94c46cf562e --- /dev/null +++ b/utils/check-style/experimental_settings_ignore.txt @@ -0,0 +1,48 @@ +allow_aggregate_partitions_independently +allow_archive_path_syntax +allow_asynchronous_read_from_io_pool_for_merge_tree +allow_changing_replica_until_first_data_packet +allow_custom_error_code_in_throwif +allow_ddl +allow_deprecated_database_ordinary +allow_deprecated_snowflake_conversion_functions +allow_distributed_ddl +allow_drop_detached +allow_execute_multiif_columnar +allow_experimental_alter_materialized_view_structure +allow_experimental_analyzer +allow_experimental_annoy_index +allow_experimental_database_atomic +allow_experimental_database_materialized_mysql +allow_experimental_database_materialized_postgresql +allow_experimental_database_replicated +allow_experimental_join_condition +allow_experimental_kafka_offsets_storage_in_keeper +allow_experimental_lightweight_delete +allow_experimental_materialized_postgresql_table +allow_experimental_parallel_reading_from_replicas +allow_experimental_projection_optimization +allow_experimental_query_cache +allow_experimental_query_deduplication +allow_experimental_refreshable_materialized_view +allow_experimental_shared_merge_tree +allow_experimental_statistic +allow_experimental_statistics +allow_experimental_time_series_table +allow_experimental_undrop_table_query +allow_experimental_usearch_index +allow_get_client_http_header +allow_introspection_functions +allow_materialized_view_with_bad_select +allow_named_collection_override_by_default +allow_non_metadata_alters +allow_nonconst_timezone_arguments +allow_nondeterministic_mutations +allow_nondeterministic_optimize_skip_unused_shards +allow_prefetched_read_pool_for_local_filesystem +allow_prefetched_read_pool_for_remote_filesystem +allow_push_predicate_when_subquery_contains_with +allow_settings_after_format_in_insert +allow_statistic_optimize +allow_statistics_optimize +allow_unrestricted_reads_from_keeper diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index f46353277e2..8556375d543 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v24.8.1.2684-lts 2024-08-21 v24.7.3.42-stable 2024-08-08 v24.7.2.13-stable 2024-08-01 v24.7.1.2915-stable 2024-07-30 @@ -13,6 +14,8 @@ v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.8.13-lts 2024-08-20 +v24.3.7.30-lts 2024-08-14 v24.3.6.48-lts 2024-08-02 v24.3.5.46-lts 2024-07-03 v24.3.4.147-lts 2024-06-13