Compare commits

...

316 Commits

Author SHA1 Message Date
Arthur Passos
50b3d3172c add no fast test 2024-08-27 16:48:10 -03:00
Arthur Passos
c298b20ba9 extract some tests into sql as an attempt to make them run faster for flaky check.. 2024-08-27 16:17:35 -03:00
Arthur Passos
0b29aef1a0 remove extra dot from ex message 2024-08-27 15:25:41 -03:00
Arthur Passos
a0ab22e031 style 2024-08-27 13:01:10 -03:00
Arthur Passos
6f806124a3 Merge branch 'master' into multi_auth_methods 2024-08-27 12:24:53 -03:00
Arthur Passos
a65d175a81 change parsing logic a bit 2024-08-27 12:23:42 -03:00
vdimir
bb22736bc3
Merge pull request #68867 from ucasfl/url-engine
Add virtual column _headers for url table engine
2024-08-27 13:27:19 +00:00
János Benjamin Antal
964641a28c
Merge pull request #67583 from depressed-pho/fix-fromModifiedJulianDay
Fix the upper bound of function fromModifiedJulianDay()
2024-08-27 13:20:57 +00:00
jsc0218
6584790ef4
Merge pull request #68752 from jsc0218/Fix02477Timeout
Fix 2477 timeout
2024-08-27 13:06:38 +00:00
Han Fei
a0e9412c72
Merge pull request #68499 from denis-hananein/fix-68239-sample-n
Fix 68239 sample n
2024-08-27 13:05:51 +00:00
Daniil Ivanik
e0dc32bc61
Merge pull request #68210 from ClickHouse/divanik/add_local_and_azure_iceberg_support
Support partial Iceberg reading in azure and local storages
2024-08-27 11:52:43 +00:00
Alexander Tokmakov
60a0ea5746
Merge pull request #68366 from ClickHouse/check_merge_entries
Check that merge entries are valid
2024-08-27 11:17:11 +00:00
Max K.
cd2e66ba58
Merge pull request #68931 from ClickHouse/ci_job_rerun_fix
CI: Fix job rerun check
2024-08-27 11:17:04 +00:00
Igor Nikonov
98562b0d15
Merge pull request #67700 from ClickHouse/asan-invalid-shared-context-access
Fix(asan): access destroyed shared context from handleCrash()
2024-08-27 10:55:56 +00:00
Raúl Marín
e3ef11e505
Merge pull request #67983 from Algunenano/flaky_win_view
Fix window view missing blocks due to slow flush to view
2024-08-27 10:35:09 +00:00
Kruglov Pavel
e13e537a06
Merge pull request #68128 from RodolpheDuge/odbc_http_retry_from_configuration
odbc: get http_max_tries  from server configuration
2024-08-27 10:32:40 +00:00
robot-clickhouse
0ad66778da Automatic style fix 2024-08-27 10:19:30 +00:00
alesapin
df44c2a4ff
Merge pull request #68897 from ClickHouse/revert-59173-fix_prewhere_without_columns
Revert "Fix prewhere without columns and without adaptive index granularity (almost w/o anything)"
2024-08-27 10:15:35 +00:00
Max Kainov
94f168e5ef CI: Fix job rerun check 2024-08-27 12:13:21 +02:00
Raúl Marín
d0c36c613d
Merge pull request #68048 from Algunenano/no-parallel-6
Remove some no-parallel tags from tests (Part 6)
2024-08-27 09:16:13 +00:00
jsc0218
6289c65e02
Merge pull request #62364 from cangyin/fix-projection-merge
Rebuild projection for merges that reduces rows
2024-08-27 00:59:42 +00:00
jsc0218
033b9cc28c
Merge pull request #68835 from jsc0218/FixFuncTypo
Fix Function Typo
2024-08-26 21:06:07 +00:00
Alexander Tokmakov
936bbe7d0d handle trash parts correctly 2024-08-26 20:02:03 +02:00
Kseniia Sumarokova
8b2db6276c
Merge pull request #68836 from ClickHouse/fix-delta-lake-bug-in-schema-parsing
Fix complex types metadata parsing in DeltaLake
2024-08-26 17:44:22 +00:00
Raúl Marín
b64b462468 Merge remote-tracking branch 'blessed/master' into no-parallel-6 2024-08-26 17:53:44 +02:00
Raúl Marín
e8d9fbdd1f Merge remote-tracking branch 'blessed/master' into flaky_win_view 2024-08-26 17:49:22 +02:00
Alexander Gololobov
c27513e540
Revert "Fix prewhere without columns and without adaptive index granularity (almost w/o anything)" 2024-08-26 17:38:07 +02:00
vdimir
e7d17573e1
Add assert src/Storages/StorageURL.cpp 2024-08-26 16:55:45 +02:00
Igor Nikonov
3bdad9baa5
Merge branch 'master' into asan-invalid-shared-context-access 2024-08-26 16:20:49 +02:00
jsc0218
b381c9dd84 Merge remote-tracking branch 'upstream/master' into fix-projection-merge 2024-08-26 13:06:58 +00:00
jsc0218
3cd11afa61 Merge remote-tracking branch 'origin/master' into Fix02477Timeout 2024-08-26 12:59:01 +00:00
jsc0218
2a9d61efec Merge remote-tracking branch 'origin/master' into FixFuncTypo 2024-08-26 12:56:09 +00:00
Konstantin Bogdanov
1cdccd527f
Merge pull request #68639 from m7kss1/ripedmd-160
Add RIPEMD160 function
2024-08-26 12:06:18 +00:00
Yarik Briukhovetskyi
6e8fed3c62
Merge pull request #68734 from yariks5s/fix_test_00080_show_tables_and_system_tables
Fix flaky `00080_show_tables_and_system_tables`
2024-08-26 12:04:48 +00:00
Kruglov Pavel
e361417ff6
Merge pull request #68298 from Avogar/fix-nullable-schema-inference
Fix using schema_inference_make_columns_nullable=0
2024-08-26 11:52:34 +00:00
Kruglov Pavel
0837a51313
Merge pull request #68632 from Avogar/fix-dynamic-tests
Disable min_bytes_to_use_direct_io in some tests with Dynamic/JSON subcolumns because it's broken
2024-08-26 11:52:14 +00:00
Kruglov Pavel
76493b31b6
Merge pull request #68653 from ClickHouse/Avogar-patch-6
Increase connectTimeoutMs IMDS connection timeout to 50ms to avoid failures in CI
2024-08-26 11:50:41 +00:00
Kruglov Pavel
e08964c35f
Merge pull request #68665 from ClickHouse/Avogar-patch-7
Fix flaky test test_distributed_replica_max_ignored_errors
2024-08-26 11:50:31 +00:00
Kruglov Pavel
72c3b0212d
Merge pull request #68437 from bigo-sg/devirtualize_schema_reader
Try to devirtualize format reader in RowInputFormatWithNamesAndTypes
2024-08-26 11:43:54 +00:00
Kseniia Sumarokova
6e584dd541
Fix test 2024-08-26 11:33:08 +02:00
divanik
3581eb34e7 Merge branch 'master' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-26 09:04:13 +00:00
Yarik Briukhovetskyi
eb71d3be42
Merge branch 'ClickHouse:master' into fix_test_00080_show_tables_and_system_tables 2024-08-26 00:07:17 +02:00
Nikita Mikhaylov
674cddc969
Merge pull request #68848 from amosbird/fix-68618
Fix empty tuple in array
2024-08-25 20:23:43 +00:00
Nikita Mikhaylov
49f6112e4f
Merge pull request #68866 from ClickHouse/fix-ssl-handshake-error-process
Fix ssl handshake error processing
2024-08-25 20:23:24 +00:00
Nikita Mikhaylov
5c1cfeec4c
Merge pull request #68730 from ClickHouse/replxx-custom-descriptors
Bump Replxx to support custom descriptors
2024-08-25 17:09:36 +00:00
Nikita Mikhaylov
2888b01d64
Merge pull request #68697 from Blargian/patch-8
[Docs] fix typo and formatting in geohash page
2024-08-25 13:35:29 +00:00
Nikita Mikhaylov
a075842b90
Merge pull request #68772 from ClickHouse/reduce-time-of-01395_limit_more_cases
Split test case and reduce number of random runs to reduce the time necessary to run the test
2024-08-25 13:33:22 +00:00
Nikita Mikhaylov
f38f95a144
Update base/poco/NetSSL_OpenSSL/src/SecureSocketImpl.cpp 2024-08-25 14:26:21 +02:00
Nikita Mikhaylov
69bb1df642 Merge branch 'master' of github.com:ClickHouse/ClickHouse into fix-68618 2024-08-25 14:24:20 +02:00
flynn
e1e692a968 Fix and update docs 2024-08-25 08:00:16 +00:00
flynn
590cf84a83 Fix conflict 2024-08-25 07:54:11 +00:00
flynn
e4aceed36a Add virtual column _headers for url table engine 2024-08-25 07:49:30 +00:00
Yakov Olkhovskiy
f7cc3e9c59
postpone SSL handshake 2024-08-25 00:13:12 -04:00
Yakov Olkhovskiy
d163880004
process possible SSL error on connection reset 2024-08-25 00:11:31 -04:00
János Benjamin Antal
91383aa87c Merge remote-tracking branch 'origin/master' into reduce-time-of-01395_limit_more_cases 2024-08-24 22:19:33 +00:00
Nikita Mikhaylov
385c8127cf Fix FreeBSD build 2024-08-24 16:01:03 +02:00
Nikita Mikhaylov
01523cce2a Bump replxx 2024-08-24 16:01:03 +02:00
Nikita Mikhaylov
78c175225b Done 2024-08-24 16:01:03 +02:00
Nikita Mikhaylov
e7054029c4
Merge pull request #68843 from ClickHouse/fix-float
Fix flaky `02932_analyzer_rewrite_sum_column_and_constant `
2024-08-24 13:40:53 +00:00
Han Fei
b578d1af1c
Merge pull request #68820 from hanfei1991/hanfei/fix-logical-err-stats
fix logical err of modify statistics
2024-08-24 13:39:48 +00:00
Nikita Mikhaylov
114499526e
Merge pull request #68845 from tbragin/patch-13
Update README.md - Add Austin meetup
2024-08-24 13:37:34 +00:00
Anton Popov
7cfe1ec25c
Merge pull request #68842 from CurtizJ/fix-test-03221-mutation
Fix test `03221_mutation_analyzer_skip_part`
2024-08-24 13:32:41 +00:00
Nikita Mikhaylov
064a072146
Merge pull request #68841 from CurtizJ/fix-test-03228
Fix test `03228_virtual_column_merge_dist`
2024-08-24 13:32:01 +00:00
Amos Bird
e2aa953e70
Fix empty tuple in array 2024-08-24 20:45:10 +08:00
Kruglov Pavel
a34191f3b1
Merge pull request #68681 from Avogar/fix-variant-permutation
Fix ColumnVariant permutation
2024-08-24 11:56:23 +00:00
Kruglov Pavel
d477bef82d
Merge pull request #68686 from Avogar/fix-structure-comparison-json
Fix structure comparison between 2 JSON columns
2024-08-24 11:56:12 +00:00
Kruglov Pavel
293821a186
Merge pull request #68802 from Avogar/consistent-dynamic-structure
Make dynamic structure selection more consistent
2024-08-24 11:53:17 +00:00
Kruglov Pavel
906b7aebc6
Merge pull request #68824 from Avogar/fix-dynamic-subcolumns-resolutuon
Fix resolving dynamic subcolumns from subqueries in analyzer
2024-08-24 11:51:09 +00:00
Han Fei
5fe151529a fix flacky although that is not actually flacky 2024-08-24 07:33:18 +02:00
Tanya Bragin
080b8f74be
Update README.md
Add Austin meetup
2024-08-23 15:50:56 -07:00
Anton Popov
0a35b111ff fix test 03221_mutation_analyzer_skip_part 2024-08-23 21:40:54 +00:00
Han Fei
6fb8f2b4ee fix black 2024-08-23 23:19:03 +02:00
Han Fei
0f265ce33d address comments 2024-08-23 23:13:53 +02:00
Nikita Mikhaylov
a824217193 Done 2024-08-23 20:29:04 +00:00
Anton Popov
80504e7b9b fix test 03228_virtual_column_merge_dist 2024-08-23 19:07:25 +00:00
Alexander Tokmakov
ed5114b94c Merge branch 'master' into check_merge_entries 2024-08-23 20:06:42 +02:00
Alexander Tokmakov
d3f3bc3565
Merge pull request #68629 from ClickHouse/revert-68515-fix-01079_bad_alters_zookeeper_long
Fix test `01079_bad_alters_zookeeper_long`
2024-08-23 18:05:03 +00:00
Yarik Briukhovetskyi
55116da80d
Merge pull request #68606 from yariks5s/remove_initial_underscores_hive
Prioritizing of virtual columns in hive partitioning
2024-08-23 16:32:13 +00:00
vdimir
642657d02c
Merge pull request #68612 from leonkozlowski/docs/fix-merge-tree-primary-key-docs
patch: fix reference to sorting key in primary key docs
2024-08-23 16:29:25 +00:00
kssenii
eb94847ed9 Fix 2024-08-23 18:14:02 +02:00
jsc0218
3278287779 fix 2024-08-23 15:26:45 +00:00
avogar
7aabd7d2fd Fix resolving dynamic subcolumns from subqueries in analyzer 2024-08-23 15:11:51 +00:00
Han Fei
61fa4e7a47 fix logical err of modify statistics 2024-08-23 16:38:48 +02:00
jsc0218
eb25e064ac reduce amount 2024-08-23 13:53:41 +00:00
avogar
5d6b861ff0 Fix index with limit=0 2024-08-23 13:49:36 +00:00
Yarik Briukhovetskyi
15f04fa313
Merge branch 'ClickHouse:master' into fix_test_00080_show_tables_and_system_tables 2024-08-23 15:29:52 +02:00
alesapin
c0b36c946d
Merge pull request #68715 from ClickHouse/fix_flaky_test_222
Turn off fault injection for insert in `01396_inactive_replica_cleanup_nodes_zookeeper`
2024-08-23 13:06:55 +00:00
Kruglov Pavel
2b20b2d4de
Update src/Columns/ColumnDynamic.cpp
Co-authored-by: Dmitry Novik <mrnovikd@gmail.com>
2024-08-23 15:02:43 +02:00
Kruglov Pavel
6f5210644b
Update src/Columns/ColumnObject.cpp
Co-authored-by: Alexander Gololobov <davenger@clickhouse.com>
2024-08-23 14:43:09 +02:00
Vitaly Baranov
4d5ac30a87
Merge pull request #67748 from vitlibar/fix-role-cache-expiration
Fix expiration in RoleCache
2024-08-23 12:29:56 +00:00
Kseniia Sumarokova
30125b5a89
Merge pull request #68594 from ClickHouse/kssenii-patch-12
Update 02995_index_7.sh
2024-08-23 12:25:22 +00:00
avogar
0bdb18e4af Merge branch 'master' of github.com:Clict pushkHouse/ClickHouse into fix-dynamic-tests 2024-08-23 12:21:41 +00:00
Kruglov Pavel
1a479b43fd
Merge pull request #68688 from ClickHouse/Avogar-patch-10
Fix Upgrade Check: move some settings to 24.9 section
2024-08-23 12:17:43 +00:00
avogar
1165ae756d Make dynamic structure selection more consistent 2024-08-23 12:16:16 +00:00
Miсhael Stetsyuk
a4ee666ec5
Merge pull request #68733 from ClickHouse/write-metadata-to-disk-and-zk-in-same-format
Write metadata to disk and keeper in the same format
2024-08-23 10:51:15 +00:00
Nikita Mikhaylov
c9c537e3ab
Merge pull request #68764 from tbragin/patch-12
Update README.md
2024-08-23 10:42:01 +00:00
Sema Checherinda
573d83ff97
Merge pull request #68728 from ClickHouse/chesema-around-logs
fix shutdown for PeriodicLog
2024-08-23 10:34:35 +00:00
Max K.
b5406a4be1
Merge pull request #68751 from ClickHouse/ci_rerun_check_upd
CI: Make job rerun possible if triggered manually
2024-08-23 10:20:14 +00:00
alesapin
44d3a94c61 Merge remote-tracking branch 'origin/master' into fix_flaky_test_222 2024-08-23 12:04:06 +02:00
Yarik Briukhovetskyi
b0894bffe6
change test file location 2024-08-23 12:01:17 +02:00
Maxim Dergousov
8cc5d766b5 small cosmetic changes in docs 2024-08-23 12:52:55 +03:00
Maxim Dergousov
8c4329964f small cosmetic changes in docs 2024-08-23 12:50:18 +03:00
alesapin
20c62dc978
Merge pull request #68737 from ClickHouse/fix_flaky_test_989
Fix flaky test 00989_parallel_parts_loading
2024-08-23 09:33:21 +00:00
Max K.
894bbbf021
Merge pull request #68654 from ClickHouse/ci_fix_sqllogic
CI: Disable SQLLogic job
2024-08-23 09:29:02 +00:00
János Benjamin Antal
6ba686d251 Split test case and reduce number of random runs to reduce time necessary to run the test 2024-08-23 09:20:40 +00:00
Robert Schulze
df361bd5ec
Merge pull request #68447 from rschu1ze/remove-multiquery
Remove obsolete `-n` / `--multiquery` from tests
2024-08-23 09:12:58 +00:00
Max K.
dff93d2c80
Merge pull request #68712 from ClickHouse/ci_fix_stress_test
CI: Stress test fix
2024-08-23 09:07:00 +00:00
Robert Schulze
6775e60331
Merge pull request #68731 from aiven-sal/aiven-sal/fhsip2
Fix regression in `sipHash(64/128)Keyed`
2024-08-23 09:02:03 +00:00
robot-clickhouse
b22423068b
Merge pull request #68768 from ClickHouse/auto/v24.7.4.51-stable
Update version_date.tsv and changelog after v24.7.4.51-stable
2024-08-23 08:32:58 +00:00
Kseniia Sumarokova
d50a9cdec1
Merge branch 'master' into kssenii-patch-12 2024-08-23 10:20:49 +02:00
robot-clickhouse
eec720dab6 Update version_date.tsv and changelogs after v24.7.4.51-stable 2024-08-23 08:05:27 +00:00
robot-clickhouse
0fd3694373
Merge pull request #68767 from ClickHouse/auto/v24.6.4.42-stable
Update version_date.tsv and changelog after v24.6.4.42-stable
2024-08-23 08:04:08 +00:00
robot-clickhouse
e1a7bd9163 Update version_date.tsv and changelogs after v24.6.4.42-stable 2024-08-23 07:37:32 +00:00
robot-clickhouse
b094f5344f
Merge pull request #68766 from ClickHouse/auto/v24.5.6.45-stable
Update version_date.tsv and changelog after v24.5.6.45-stable
2024-08-23 07:33:53 +00:00
robot-clickhouse
e538080665 Update version_date.tsv and changelogs after v24.5.6.45-stable 2024-08-23 07:09:03 +00:00
Tanya Bragin
60e4bcbbf0
Update README.md
Update Raleigh meetup link
2024-08-22 20:45:28 -07:00
jsc0218
073ef13e36 fix 2024-08-23 01:02:46 +00:00
Max K.
131d01922f
Merge pull request #68750 from ClickHouse/ci_force_debug_build_on_release_branch
CI: Force package_debug build on release branches
2024-08-23 01:01:43 +00:00
Max Kainov
f5739dfe06 CI: Make job rerun possible if triggered manually 2024-08-23 02:58:50 +02:00
Max Kainov
4c790999eb CI: Force package_debug build on release branches 2024-08-23 02:18:26 +02:00
Yarik Briukhovetskyi
13680b3c5f
Merge branch 'ClickHouse:master' into fix_test_00080_show_tables_and_system_tables 2024-08-23 01:42:16 +02:00
Anton Popov
012cf0763f
Merge pull request #68672 from CurtizJ/return-back-virtual-column
Return back virtual columns to distributed tables
2024-08-22 23:17:59 +00:00
Yarik Briukhovetskyi
dc862b1411
fix test 2024-08-22 23:40:18 +02:00
Nikita Mikhaylov
5bb9ddea2b
Merge pull request #68725 from ClickHouse/Avogar-patch-8
Fix flaky check
2024-08-22 21:24:37 +00:00
Konstantin Bogdanov
69f6ea5083
Update docs/en/sql-reference/functions/hash-functions.md 2024-08-22 22:07:02 +02:00
robot-clickhouse
e9ff092d0b
Merge pull request #68745 from ClickHouse/auto/v24.5.5.41-stable
Update version_date.tsv and changelog after v24.5.5.41-stable
2024-08-22 19:35:21 +00:00
Max Kainov
4200b3d5cb CI: Stress test fix 2024-08-22 21:19:56 +02:00
robot-clickhouse
f89193fa41 Update version_date.tsv and changelogs after v24.5.5.41-stable 2024-08-22 19:12:19 +00:00
Kruglov Pavel
f8e08967af
Merge pull request #68736 from mneedham/patch-5
Update newjson.md
2024-08-22 18:38:20 +00:00
Yarik Briukhovetskyi
e39d786b1b
Merge branch 'ClickHouse:master' into fix_test_00080_show_tables_and_system_tables 2024-08-22 20:31:09 +02:00
Dergousov
a2ff8e4384 fix: correct return type inconsistencies in docs 2024-08-22 20:49:56 +03:00
Konstantin Bogdanov
ef9fbe3006
fix: disable running test in fasttest due to missing OpenSSL 2024-08-22 19:44:05 +02:00
Konstantin Bogdanov
2a32207e9e
fix: wrap in conditional preprocessor directives 2024-08-22 19:08:05 +02:00
Yarik Briukhovetskyi
06c46ee75b
add one more test 2024-08-22 18:56:50 +02:00
robot-clickhouse
8921eec672
Merge pull request #68740 from ClickHouse/auto/v24.8.2.3-lts
Update version_date.tsv and changelog after v24.8.2.3-lts
2024-08-22 16:55:37 +00:00
Tyler Hannan
55252b635f
Merge pull request #68738 from ClickHouse/tylerhannan-patch-1
Update README.md
2024-08-22 16:45:54 +00:00
robot-clickhouse
a780f64cb5
Merge pull request #68735 from ClickHouse/auto/v24.7.3.47-stable
Update version_date.tsv and changelog after v24.7.3.47-stable
2024-08-22 16:40:34 +00:00
Yarik Briukhovetskyi
3ae4370ce5
Merge pull request #68692 from SignFinder/patch-1
There is actual information on english version of page, but still deprecated - on russian page.
2024-08-22 16:37:55 +00:00
Max Kainov
fa453c3664 Disable SqlLogic job 2024-08-22 18:20:33 +02:00
robot-clickhouse
4264fbc037 Update version_date.tsv and changelogs after v24.8.2.3-lts 2024-08-22 16:16:47 +00:00
Tyler Hannan
1692360233
Update README.md
26 and 266 are different
2024-08-22 18:12:38 +02:00
Sema Checherinda
e7b89537bf fix style 2024-08-22 18:02:42 +02:00
alesapin
52cdd88eb6 Better comment 2024-08-22 17:59:10 +02:00
Tyler Hannan
0bd8ebf626
Update README.md
adding community call. resolving recent recordings
2024-08-22 17:58:56 +02:00
alesapin
9c0e1df166 Fix flaky test 00989_parallel_parts_loading 2024-08-22 17:58:15 +02:00
Yarik Briukhovetskyi
28fbd8a4ef
fix stateless tests 2024-08-22 17:56:16 +02:00
Mark Needham
7c3a013d56
Update newjson.md 2024-08-22 16:53:30 +01:00
Yarik Briukhovetskyi
980b02bfd6
fix compatibility with en version 2024-08-22 17:48:57 +02:00
Yarik Briukhovetskyi
5f61e19340
small fixes 2024-08-22 17:46:47 +02:00
robot-clickhouse
c63cec756f
Merge pull request #68732 from ClickHouse/auto/v24.6.3.38-stable
Update version_date.tsv and changelog after v24.6.3.38-stable
2024-08-22 15:44:33 +00:00
Nikolay Degterinsky
1547dd2bde
Merge pull request #68645 from zghong/fix-invalid-char-in-replica_name
Fix invalid characters in replica_name
2024-08-22 15:42:39 +00:00
robot-clickhouse
51fbc629c6 Update version_date.tsv and changelogs after v24.7.3.47-stable 2024-08-22 15:42:17 +00:00
robot-clickhouse
9d75415090
Merge pull request #68729 from ClickHouse/auto/v24.5.5.41-stable
Update version_date.tsv and changelog after v24.5.5.41-stable
2024-08-22 15:24:27 +00:00
Yarik Briukhovetskyi
837f2bba8a
init 2024-08-22 17:23:45 +02:00
Sema Checherinda
859d2bfe27 move stopFlushThread to SystemLogBase 2024-08-22 17:18:06 +02:00
Michael Stetsyuk
0b9c24f31d write metadata to disk and keeper in the same format 2024-08-22 15:13:42 +00:00
robot-clickhouse
0dc18247df Update version_date.tsv and changelogs after v24.6.3.38-stable 2024-08-22 15:10:24 +00:00
Tyler Hannan
a541b106dd
Merge pull request #68723 from tbragin/patch-11
Update README.md - Meetups update
2024-08-22 14:52:09 +00:00
Salvatore Mesoraca
a93d191980 Fix typo in test case 2024-08-22 16:43:38 +02:00
Salvatore Mesoraca
1ea0163dfe Fix issue with maps with arrays as keys 2024-08-22 16:42:14 +02:00
robot-clickhouse
5340ac5fbc Update version_date.tsv and changelogs after v24.5.5.41-stable 2024-08-22 14:39:19 +00:00
Sema Checherinda
a9e793532a fix shutdown for PeriodicLog 2024-08-22 16:34:14 +02:00
Kruglov Pavel
ce33943b43
Fix flaky check 2024-08-22 15:50:59 +02:00
jsc0218
216f75cd0a Merge remote-tracking branch 'upstream/master' into fix-projection-merge 2024-08-22 13:44:12 +00:00
Anton Popov
91e65feaae fix virtual columns in Merge engine 2024-08-22 13:42:30 +00:00
Tanya Bragin
add4718634
Update README.md - Meetups update
Fixed one meetup location; Added more meetups
2024-08-22 06:37:27 -07:00
Yarik Briukhovetskyi
8d14d85230
fix black 2024-08-22 15:24:33 +02:00
Yarik Briukhovetskyi
b3f084459f
fix black 2024-08-22 14:53:53 +02:00
alesapin
7a740819b9 Accidentally deleted comment 2024-08-22 14:53:15 +02:00
alesapin
54dd3afd49 Turn off fault injection for insert in 01396_inactive_replica_cleanup_nodes_zookeeper 2024-08-22 14:52:17 +02:00
Vitaly Baranov
7ef5c366e8 Fix expiration in RoleCache. 2024-08-22 13:56:55 +02:00
Vitaly Baranov
664e9b3db9 Add one more test. 2024-08-22 13:56:43 +02:00
Vitaly Baranov
84467077b8 Fix test for role expiration in RoleCache. 2024-08-22 13:55:20 +02:00
Shaun Struwig
62054cae66
Update geohash.md 2024-08-22 13:49:16 +02:00
Yarik Briukhovetskyi
95f45d2eaf
try to fix tests 2024-08-22 13:20:04 +02:00
avogar
8a669f5e9d Merge branch 'master' of github.com:ClickHouse/ClickHouse into fix-dynamic-tests 2024-08-22 11:05:44 +00:00
avogar
e73e8e7a08 Merge branch 'master' of github.com:ClickHouse/ClickHouse into Avogar-patch-10 2024-08-22 10:43:23 +00:00
Kseniia Sumarokova
c6a197aed3
Merge branch 'master' into kssenii-patch-12 2024-08-22 11:59:28 +02:00
Shaun Struwig
6466f374e0
Update geohash.md 2024-08-22 11:29:33 +02:00
Alexey
be4439e3ec
Update install.md
Added correct commands for russian vwersion of the installation from deb packets
2024-08-22 10:30:48 +03:00
李扬
9a35f0bf15
Merge branch 'ClickHouse:master' into devirtualize_schema_reader 2024-08-22 10:59:31 +08:00
Konstantin Bogdanov
54caf1f84e
fix: wrap in conditional preprocessor directives 2024-08-22 01:20:46 +02:00
Konstantin Bogdanov
0f3c7ae8c2
feat: add docs 2024-08-22 01:15:16 +02:00
Yarik Briukhovetskyi
8cf6323125
fix black 2024-08-22 00:48:29 +02:00
Kruglov Pavel
8a89cd31a1
Fix Upgrade Check: move some settings to 24.9 section 2024-08-22 00:29:32 +02:00
Yarik Briukhovetskyi
2f6ad1271c
fix tests + exception 2024-08-22 00:27:02 +02:00
avogar
bff252ea73 Fix test 2024-08-21 21:45:26 +00:00
avogar
ca880ccdee Fix structure comparison between 2 JSON columns 2024-08-21 20:47:48 +00:00
Max Kainov
6e5465ae51 CI: SQLLogix job fix 2024-08-21 22:31:48 +02:00
Dergousov
7f15f61426 feat: add docs 2024-08-21 22:46:55 +03:00
Dergousov
dfe0beb53b feat: add docs 2024-08-21 22:46:29 +03:00
avogar
38f9ef6bc9 Fix ColumnVariant permutation 2024-08-21 19:08:07 +00:00
Dergousov
74d8971432 fix: use OpenSSL RIPEMD160 impl 2024-08-21 22:06:52 +03:00
Yarik Briukhovetskyi
a52eff299e
fix tests 2024-08-21 19:43:45 +02:00
yariks5s
2e58ac5611 build fix 2024-08-21 16:30:42 +00:00
Anton Popov
e87de3cfcd return back virtual columns to distributed tables 2024-08-21 15:19:29 +00:00
Yarik Briukhovetskyi
5965297d8b
add accidentally removed virtual column 2024-08-21 16:35:39 +02:00
Yarik Briukhovetskyi
1afd3a7c3a
give priority to parsed columns over storage columns 2024-08-21 16:24:43 +02:00
Kruglov Pavel
3fd50ed856
Fix flaky test test_distributed_replica_max_ignored_errors 2024-08-21 16:23:37 +02:00
李扬
712c9855d2
Merge branch 'ClickHouse:master' into devirtualize_schema_reader 2024-08-21 22:20:04 +08:00
leonkozlowski
4a7a04b35b patch: build 2024-08-21 10:13:02 -04:00
Kruglov Pavel
6db7b99543
Increase connectTimeoutMs IMDS connection timeout to 50ms to avoid failures in CI 2024-08-21 15:42:46 +02:00
Kruglov Pavel
a387807c84
Fix build 2024-08-21 15:14:51 +02:00
avogar
102f2cf3e9 Merge branch 'master' of github.com:ClickHouse/ClickHouse into fix-dynamic-tests 2024-08-21 12:47:17 +00:00
Robert Schulze
915daafd3a
Fix 01086_window_view_cleanup.sh 2024-08-21 10:45:48 +00:00
Zhigao Hong
e01a448bcc Fix invalid characters in replica_name 2024-08-21 15:35:33 +08:00
Dergousov
a0d29c812c fix: cosmetic 2024-08-21 02:47:23 +03:00
Dergousov
bb2b660096 fix: cosmetic 2024-08-21 02:11:08 +03:00
Dergousov
5740df58b9 feat: add test 2024-08-21 01:17:40 +03:00
Dergousov
1626589bb3 feat: add ripeMD160 support 2024-08-21 00:48:23 +03:00
avogar
539d04c90f Disable min_bytes_to_use_direct_io in some tests with Dynamic/JSON subcolumns because it's broken 2024-08-20 20:00:23 +00:00
Robert Schulze
434458cc83
Remove -n / --multiquery 2024-08-20 18:19:43 +00:00
Alexander Tokmakov
0b68517279 skip projections 2024-08-20 20:01:35 +02:00
Alexander Tokmakov
2ad50a5f3c
Update 01079_bad_alters_zookeeper_long.sh 2024-08-20 19:56:22 +02:00
Alexander Tokmakov
fe637452ec
Revert "Fix test 01079_bad_alters_zookeeper_long" 2024-08-20 19:54:12 +02:00
Alexander Tokmakov
aafa504bb5 Merge branch 'master' into check_merge_entries 2024-08-20 17:51:52 +02:00
leonkozlowski
e416a2b3d2 patch: fix reference to sorting key in primary key docs 2024-08-20 09:42:19 -04:00
jsc0218
1c53a02e4c Merge remote-tracking branch 'upstream/master' into fix-projection-merge 2024-08-20 13:19:23 +00:00
Yarik Briukhovetskyi
24eeaffa7a
init 2024-08-20 14:02:09 +02:00
Kseniia Sumarokova
0ccbb554b9
Update 02995_index_7.sh 2024-08-20 10:58:14 +02:00
PHO
4b05106cc4 Fix the upper bound of function fromModifiedJulianDay()
The upper bound was supposed to be 9999-12-31 but it was accidentally
set to 9999-01-01.
2024-08-20 17:49:26 +09:00
jsc0218
902e7b6f29 ignore broken proj 2024-08-19 23:58:48 +00:00
Kruglov Pavel
00a27669df
Fix builds 2024-08-19 20:22:14 +02:00
Han Fei
3ff97813f4 add a test 2024-08-19 17:28:29 +00:00
jsc0218
527774d138 use new option name 2024-08-19 15:26:17 +00:00
Alexander Tokmakov
1049e36653
Update MergeList.cpp 2024-08-18 23:35:20 +02:00
Denis Hananein
a8a31ed137 Add unused 2024-08-17 21:14:25 +02:00
Denis Hananein
3adfea8653 Fix #68239 SAMPLE n 2024-08-17 06:06:04 +02:00
jsc0218
3388557306
Merge branch 'master' into fix-projection-merge 2024-08-16 21:02:51 -04:00
jsc0218
a0c5466e32 Merge remote-tracking branch 'upstream/master' into fix-projection-merge 2024-08-16 18:53:57 +00:00
Kruglov Pavel
5fe46af422
Update 02497_schema_inference_nulls.sql 2024-08-16 18:12:51 +02:00
Kruglov Pavel
6f7e4ce3aa
Merge branch 'master' into fix-nullable-schema-inference 2024-08-16 15:39:34 +02:00
avogar
370b6bdc7b Update tests 2024-08-16 13:38:30 +00:00
Alexander Tokmakov
3972991b1f
Update MergeList.cpp 2024-08-15 23:49:49 +02:00
taiyang-li
92a9b29b45 devirtualize format reader 2024-08-15 22:25:21 +08:00
divanik
f17b70e3f3 Resolve issues with settings 2024-08-15 14:06:00 +00:00
divanik
ef1f0e2aaf Fix typo in docs 2024-08-15 13:16:09 +00:00
divanik
53bff26f11 Try to fix submodule 2024-08-15 13:09:39 +00:00
divanik
0b58cbca38 Fix submodules 2024-08-15 13:02:25 +00:00
divanik
40b45d84ee Merge branch 'master' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-15 12:59:53 +00:00
divanik
6a96eb5a4e Merge branch 'divanik/add_local_and_azure_iceberg_support' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-15 12:58:49 +00:00
divanik
5b105e5082 Merge master 2024-08-15 12:58:44 +00:00
divanik
df07b07cb9 Add new setting 2024-08-15 12:18:02 +00:00
jsc0218
c840a12761
Merge branch 'master' into fix-projection-merge 2024-08-14 21:38:44 -04:00
Alexander Tokmakov
88fa8cb5bb
Update MergeList.cpp 2024-08-15 00:06:41 +02:00
Alexander Tokmakov
d88299b94d check that merge entries are valid 2024-08-14 23:46:40 +02:00
jsc0218
50a42cfee6 Merge remote-tracking branch 'upstream/master' into fix-projection-merge 2024-08-14 15:12:50 +00:00
Kruglov Pavel
ccb7ecb9a2
Update src/Formats/FormatSettings.h
Co-authored-by: Alexey Katsman <alex.katsman@clickhouse.com>
2024-08-14 15:13:57 +02:00
Kruglov Pavel
e2feaefcaf
Update src/Core/Settings.h
Co-authored-by: Alexey Katsman <alex.katsman@clickhouse.com>
2024-08-14 15:13:49 +02:00
divanik
0c17de136d Merge branches 'divanik/add_local_and_azure_iceberg_support' and 'divanik/add_local_and_azure_iceberg_support' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-14 11:36:31 +00:00
divanik
476819a56f Changed test to work in parallel 2024-08-14 11:35:37 +00:00
jsc0218
6bed26a527 tidy 2024-08-14 02:19:34 +00:00
avogar
70708fd5dc Update docs, make better 2024-08-13 19:19:02 +00:00
avogar
3cfb921bef Fix using schema_inference_make_columns_nullable=0 2024-08-13 18:41:53 +00:00
Daniil Ivanik
f9f41405cc
Merge branch 'master' into divanik/add_local_and_azure_iceberg_support 2024-08-13 18:24:02 +02:00
divanik
be92986eda Possibly fix broken tests and build 2024-08-13 14:48:54 +00:00
divanik
534ec1fa01 Correct compile error 2024-08-13 10:12:45 +00:00
divanik
25ce6df895 Return local table 2024-08-12 16:22:30 +00:00
divanik
9c25314a58 Fixed settings 2024-08-12 15:28:31 +00:00
divanik
61e616204e Docs correct typos 2024-08-12 14:21:42 +00:00
divanik
58dd01d8b6 Add docs 2024-08-12 13:45:00 +00:00
divanik
42d20f2a8d Remove table registration 2024-08-12 12:52:11 +00:00
divanik
411e8f7cfb Remove unnecessary changes 2024-08-12 12:44:22 +00:00
divanik
0810703d6b Roll out strange changes 2024-08-12 12:38:38 +00:00
divanik
bbf1008886 Fix test 2024-08-12 12:15:54 +00:00
divanik
52de06514a Merge branch 'master' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-12 09:14:39 +00:00
divanik
bc5d793382 Replace asyncronouos buffer with syncronouos 2024-08-12 09:13:24 +00:00
Igor Nikonov
cd456b149b Merge remote-tracking branch 'origin/master' into asan-invalid-shared-context-access 2024-08-11 18:53:36 +00:00
Igor Nikonov
f2b60c4f74 Merge remote-tracking branch 'origin/master' into asan-invalid-shared-context-access 2024-08-10 10:08:40 +00:00
jsc0218
a837df164c fix squash related and projection collection 2024-08-10 02:37:42 +00:00
Rodolphe Dugé de Bernonville
c3ab8266eb odbc: get http retry from server configuration 2024-08-09 14:05:18 +02:00
divanik
b67b7a62a4 Merge branches 'divanik/add_local_and_azure_iceberg_support' and 'master' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-09 09:40:32 +00:00
divanik
29ce915d00 Try to fix bug 2024-08-09 09:37:16 +00:00
Raúl Marín
a53050d454 Add some comments 2024-08-08 17:01:46 +02:00
Raúl Marín
09627d9a09 Make 01038_dictionary_lifetime_min_zero_sec parallelizable 2024-08-08 17:01:38 +02:00
Raúl Marín
453b62b692 02455_one_row_from_csv_memory_usage is parallelizable 2024-08-08 16:53:07 +02:00
Raúl Marín
4b7080f2cd Parallelize 01600_detach_permanently 2024-08-08 16:49:33 +02:00
Raúl Marín
d3dffce440 Make 02247_written_bytes_quota parallel 2024-08-08 15:03:46 +02:00
Raúl Marín
de0d6f0368 Make 03002_part_log_rmt_fetch_* tests parallel 2024-08-08 15:00:13 +02:00
Raúl Marín
dbf743d58d Make 02722_database_filesystem parallelizable 2024-08-08 14:49:35 +02:00
Raúl Marín
4e97549f73 Make 02884_authentication_quota.sh more parallel 2024-08-08 14:22:11 +02:00
Raúl Marín
35eb4fa176 Schedule WV cleanup after any fire trigger 2024-08-08 12:03:14 +02:00
Raúl Marín
2992d51645 Merge remote-tracking branch 'blessed/master' into flaky_win_view 2024-08-08 12:00:39 +02:00
jsc0218
22dad244e7 fix whitespace 2024-08-08 01:50:49 +00:00
jsc0218
72c80c906b
Merge branch 'master' into fix-projection-merge 2024-08-07 21:10:54 -04:00
Raúl Marín
4eb682d4ef Merge remote-tracking branch 'blessed/master' into flaky_win_view 2024-08-07 20:01:29 +02:00
divanik
f5e993df2a Add tests 2024-08-07 16:46:33 +00:00
Raúl Marín
aca7e6734c Style 2024-08-07 16:24:56 +02:00
Raúl Marín
5f34171534 Fix window view missing blocks due to slow flush to view 2024-08-07 15:12:10 +02:00
Raúl Marín
6882e8ad79 Revert "Merge pull request #67130 from rschu1ze/unflake-win-view-tests2"
This reverts commit 48e61a295c, reversing
changes made to da24aa06fa.
2024-08-07 14:34:33 +02:00
divanik
299af052f8 Merge branch 'master' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-07 11:13:53 +00:00
jsc0218
6f60564777 fix build 2024-08-07 02:28:13 +00:00
divanik
c4e29466de qMerge branch 'master' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-06 11:59:47 +00:00
jsc0218
c234e5215f Merge remote-tracking branch 'origin/master' into fix-projection-merge 2024-08-06 01:43:46 +00:00
divanik
9e85f89e95 Merge branch 'master' of github.com:ClickHouse/ClickHouse into divanik/add_local_and_azure_iceberg_support 2024-08-05 16:15:28 +00:00
Igor Nikonov
76847d9b4c Fix(asan) : access destroyed shared context from handleCrash() 2024-08-02 22:43:05 +00:00
divanik
7e2e24c75d Unifiing tests changes 2024-07-31 10:29:12 +00:00
divanik
c59949d057 Add different iceberg tables 2024-07-22 09:50:47 +00:00
divanik
57181a5a48 Fix compilation bug 2024-07-19 14:47:57 +00:00
divanik
f0ef101bc5 Minor changes 2024-07-19 13:40:30 +00:00
divanik
07d03c0c67 Fix minor things 2024-07-19 13:30:46 +00:00
divanik
45e0f0350e Fix some stuff 2024-07-19 13:25:37 +00:00
divanik
e2b13411fd Merge branch 'master' of github.com:ClickHouse/ClickHouse into divanik/add_local_object_storage 2024-07-19 13:17:29 +00:00
divanik
27ab6aa8b5 Remove logs 2024-07-19 13:16:24 +00:00
divanik
436f6463c0 Add test 2024-07-19 13:06:30 +00:00
divanik
8fad286025 Remove asyncronous processing for local storage 2024-07-18 15:03:12 +00:00
divanik
84559ab31c Debug code 2024-07-18 10:06:06 +00:00
divanik
a760ad9446 Add logs 2024-07-17 12:34:11 +00:00
divanik
0bfe345a46 Debug code 2024-07-17 09:24:33 +00:00
divanik
d2eded16aa Debug commit 2024-07-15 10:53:22 +00:00
divanik
f54a4b073a Add local table function 2024-07-13 14:07:36 +00:00
divanik
6f03ff20d8 Add local storage 2024-07-13 14:06:24 +00:00
cangyin
5ecb5da648 Also rebuild for OPTIMIZE CLEANUP 2024-04-10 19:30:28 +00:00
cangyin
8c2a371eaa no readability-make-member-function-const 2024-04-08 20:24:54 +08:00
cangyin
603a52caa0 Add tests 2024-04-08 16:41:40 +08:00
cangyin
cc5456c649 Fix projection merge for Collapsing/Replacing/VersionedCollapsing MergeTree 2024-04-08 16:41:29 +08:00
267 changed files with 3451 additions and 1476 deletions

View File

@ -130,6 +130,7 @@ jobs:
with:
build_name: package_debug
data: ${{ needs.RunConfig.outputs.data }}
force: true
BuilderBinDarwin:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}

View File

@ -34,7 +34,7 @@ curl https://clickhouse.com/ | sh
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 20
* [v24.9 Community Call](https://clickhouse.com/company/events/v24-9-community-release-call) - September 26
## Upcoming Events
@ -44,13 +44,22 @@ The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey
* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25
* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/clickhouse-nc-meetup-group/events/302557230) - September 9
* [New York Meetup (Ramp)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9
* [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10
* [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12
Other upcoming meetups
* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27
* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27
* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5
* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
* **Recording available**: [**v24.4 Release Call**](https://www.youtube.com/watch?v=dtUqgcfOGmE) All the features of 24.4, one convenient video! Watch it now!
* **Recording available**: [**v24.8 LTS Release Call**](https://www.youtube.com/watch?v=AeLmp2jc51k) All the features of 24.8 LTS, one convenient video! Watch it now!
## Interested in joining ClickHouse and making it your full-time job?

View File

@ -18,7 +18,9 @@
#define Net_HTTPResponse_INCLUDED
#include <map>
#include <vector>
#include "Poco/Net/HTTPCookie.h"
#include "Poco/Net/HTTPMessage.h"
#include "Poco/Net/Net.h"
@ -180,6 +182,8 @@ namespace Net
/// May throw an exception in case of a malformed
/// Set-Cookie header.
void getHeaders(std::map<std::string, std::string> & headers) const;
void write(std::ostream & ostr) const;
/// Writes the HTTP response to the given
/// output stream.

View File

@ -209,6 +209,15 @@ void HTTPResponse::getCookies(std::vector<HTTPCookie>& cookies) const
}
}
void HTTPResponse::getHeaders(std::map<std::string, std::string> & headers) const
{
headers.clear();
for (const auto & it : *this)
{
headers.emplace(it.first, it.second);
}
}
void HTTPResponse::write(std::ostream& ostr) const
{

View File

@ -311,6 +311,14 @@ int SecureSocketImpl::sendBytes(const void* buffer, int length, int flags)
while (mustRetry(rc, remaining_time));
if (rc <= 0)
{
// At this stage we still can have last not yet received SSL message containing SSL error
// so make a read to force SSL to process possible SSL error
if (SSL_get_error(_pSSL, rc) == SSL_ERROR_SYSCALL && SocketImpl::lastError() == POCO_ECONNRESET)
{
char c = 0;
SSL_read(_pSSL, &c, 1);
}
rc = handleError(rc);
if (rc == 0) throw SSLConnectionUnexpectedlyClosedException();
}

View File

@ -8,4 +8,7 @@ set (CMAKE_CXX_COMPILER_TARGET "x86_64-pc-freebsd11")
set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11")
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64")
# dprintf is used in a patched version of replxx
add_compile_definitions(_WITH_DPRINTF)
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake

2
contrib/replxx vendored

@ -1 +1 @@
Subproject commit 5d04501f93a4fb7f0bb8b73b8f614bc986f9e25b
Subproject commit 711c18e7f4d951255aa8b0851e5a55d5a5fb0ddb

View File

@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.1.2684"
ARG VERSION="24.8.2.3"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.8.1.2684"
ARG VERSION="24.8.2.3"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.8.1.2684"
ARG VERSION="24.8.2.3"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off

View File

@ -112,3 +112,5 @@ wadllib==1.3.6
websocket-client==0.59.0
wheel==0.37.1
zipp==1.0.0
deltalake==0.16.0

View File

@ -40,6 +40,3 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git"
RUN git clone --recursive ${sqllogic_test_repo}
COPY run.sh /
CMD ["/bin/bash", "/run.sh"]

View File

@ -0,0 +1,71 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.5.5.41-stable (441d4a6ebe3) FIXME as compared to v24.5.4.49-stable (63b760955a0)
#### Improvement
* Backported in [#66768](https://github.com/ClickHouse/ClickHouse/issues/66768): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#65350](https://github.com/ClickHouse/ClickHouse/issues/65350): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#65621](https://github.com/ClickHouse/ClickHouse/issues/65621): Fix `Cannot find column` in distributed query with `ARRAY JOIN` by `Nested` column. Fixes [#64755](https://github.com/ClickHouse/ClickHouse/issues/64755). [#64801](https://github.com/ClickHouse/ClickHouse/pull/64801) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67902](https://github.com/ClickHouse/ClickHouse/issues/67902): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66884](https://github.com/ClickHouse/ClickHouse/issues/66884): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#65933](https://github.com/ClickHouse/ClickHouse/issues/65933): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#66301](https://github.com/ClickHouse/ClickHouse/issues/66301): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
* Backported in [#66328](https://github.com/ClickHouse/ClickHouse/issues/66328): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68252](https://github.com/ClickHouse/ClickHouse/issues/68252): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#66155](https://github.com/ClickHouse/ClickHouse/issues/66155): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#66454](https://github.com/ClickHouse/ClickHouse/issues/66454): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66226](https://github.com/ClickHouse/ClickHouse/issues/66226): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66680](https://github.com/ClickHouse/ClickHouse/issues/66680): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66604](https://github.com/ClickHouse/ClickHouse/issues/66604): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
* Backported in [#66360](https://github.com/ClickHouse/ClickHouse/issues/66360): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68064](https://github.com/ClickHouse/ClickHouse/issues/68064): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Backported in [#68158](https://github.com/ClickHouse/ClickHouse/issues/68158): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#66972](https://github.com/ClickHouse/ClickHouse/issues/66972): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66691](https://github.com/ClickHouse/ClickHouse/issues/66691): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
* Backported in [#66969](https://github.com/ClickHouse/ClickHouse/issues/66969): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66720](https://github.com/ClickHouse/ClickHouse/issues/66720): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#66951](https://github.com/ClickHouse/ClickHouse/issues/66951): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66757](https://github.com/ClickHouse/ClickHouse/issues/66757): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66948](https://github.com/ClickHouse/ClickHouse/issues/66948): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68115](https://github.com/ClickHouse/ClickHouse/issues/68115): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67633](https://github.com/ClickHouse/ClickHouse/issues/67633): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#67481](https://github.com/ClickHouse/ClickHouse/issues/67481): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
* Backported in [#67814](https://github.com/ClickHouse/ClickHouse/issues/67814): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67197](https://github.com/ClickHouse/ClickHouse/issues/67197): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#67379](https://github.com/ClickHouse/ClickHouse/issues/67379): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67501](https://github.com/ClickHouse/ClickHouse/issues/67501): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#67886](https://github.com/ClickHouse/ClickHouse/issues/67886): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67576](https://github.com/ClickHouse/ClickHouse/issues/67576): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67850](https://github.com/ClickHouse/ClickHouse/issues/67850): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#68272](https://github.com/ClickHouse/ClickHouse/issues/68272): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#67807](https://github.com/ClickHouse/ClickHouse/issues/67807): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67836](https://github.com/ClickHouse/ClickHouse/issues/67836): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#67991](https://github.com/ClickHouse/ClickHouse/issues/67991): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68207](https://github.com/ClickHouse/ClickHouse/issues/68207): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68091](https://github.com/ClickHouse/ClickHouse/issues/68091): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#68122](https://github.com/ClickHouse/ClickHouse/issues/68122): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68171](https://github.com/ClickHouse/ClickHouse/issues/68171): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
* Backported in [#68337](https://github.com/ClickHouse/ClickHouse/issues/68337): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#68667](https://github.com/ClickHouse/ClickHouse/issues/68667): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#66387](https://github.com/ClickHouse/ClickHouse/issues/66387): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
* Backported in [#66426](https://github.com/ClickHouse/ClickHouse/issues/66426): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66544](https://github.com/ClickHouse/ClickHouse/issues/66544): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66859](https://github.com/ClickHouse/ClickHouse/issues/66859): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
* Backported in [#66875](https://github.com/ClickHouse/ClickHouse/issues/66875): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
* Backported in [#67059](https://github.com/ClickHouse/ClickHouse/issues/67059): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
* Backported in [#66945](https://github.com/ClickHouse/ClickHouse/issues/66945): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67252](https://github.com/ClickHouse/ClickHouse/issues/67252): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
* Backported in [#67412](https://github.com/ClickHouse/ClickHouse/issues/67412): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
* Update version after release. [#67862](https://github.com/ClickHouse/ClickHouse/pull/67862) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Backported in [#68077](https://github.com/ClickHouse/ClickHouse/issues/68077): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).

View File

@ -0,0 +1,33 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.5.6.45-stable (bdca8604c29) FIXME as compared to v24.5.5.78-stable (0138248cb62)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#67902](https://github.com/ClickHouse/ClickHouse/issues/67902): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68252](https://github.com/ClickHouse/ClickHouse/issues/68252): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#68064](https://github.com/ClickHouse/ClickHouse/issues/68064): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Backported in [#68158](https://github.com/ClickHouse/ClickHouse/issues/68158): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#68115](https://github.com/ClickHouse/ClickHouse/issues/68115): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67886](https://github.com/ClickHouse/ClickHouse/issues/67886): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#68272](https://github.com/ClickHouse/ClickHouse/issues/68272): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#67807](https://github.com/ClickHouse/ClickHouse/issues/67807): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67836](https://github.com/ClickHouse/ClickHouse/issues/67836): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#67991](https://github.com/ClickHouse/ClickHouse/issues/67991): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68207](https://github.com/ClickHouse/ClickHouse/issues/68207): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68091](https://github.com/ClickHouse/ClickHouse/issues/68091): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#68122](https://github.com/ClickHouse/ClickHouse/issues/68122): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68171](https://github.com/ClickHouse/ClickHouse/issues/68171): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
* Backported in [#68337](https://github.com/ClickHouse/ClickHouse/issues/68337): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#68667](https://github.com/ClickHouse/ClickHouse/issues/68667): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Update version after release. [#67862](https://github.com/ClickHouse/ClickHouse/pull/67862) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Backported in [#68077](https://github.com/ClickHouse/ClickHouse/issues/68077): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
* Backported in [#68756](https://github.com/ClickHouse/ClickHouse/issues/68756): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).

View File

@ -0,0 +1,83 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.6.3.38-stable (4e33c831589) FIXME as compared to v24.6.2.17-stable (5710a8b5c0c)
#### Improvement
* Backported in [#66770](https://github.com/ClickHouse/ClickHouse/issues/66770): Make allow_experimental_analyzer be controlled by the initiator for distributed queries. This ensures compatibility and correctness during operations in mixed version clusters. [#65777](https://github.com/ClickHouse/ClickHouse/pull/65777) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#66885](https://github.com/ClickHouse/ClickHouse/issues/66885): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66303](https://github.com/ClickHouse/ClickHouse/issues/66303): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
* Backported in [#66330](https://github.com/ClickHouse/ClickHouse/issues/66330): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#66157](https://github.com/ClickHouse/ClickHouse/issues/66157): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#66210](https://github.com/ClickHouse/ClickHouse/issues/66210): Disable the `merge-filters` optimization introduced in [#64760](https://github.com/ClickHouse/ClickHouse/issues/64760). It may cause an exception if optimization merges two filter expressions and does not apply a short-circuit evaluation. [#66126](https://github.com/ClickHouse/ClickHouse/pull/66126) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66456](https://github.com/ClickHouse/ClickHouse/issues/66456): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66228](https://github.com/ClickHouse/ClickHouse/issues/66228): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66183](https://github.com/ClickHouse/ClickHouse/issues/66183): Fix rare case with missing data in the result of distributed query, close [#61432](https://github.com/ClickHouse/ClickHouse/issues/61432). [#66174](https://github.com/ClickHouse/ClickHouse/pull/66174) ([vdimir](https://github.com/vdimir)).
* Backported in [#66271](https://github.com/ClickHouse/ClickHouse/issues/66271): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#66682](https://github.com/ClickHouse/ClickHouse/issues/66682): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66587](https://github.com/ClickHouse/ClickHouse/issues/66587): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
* Backported in [#66362](https://github.com/ClickHouse/ClickHouse/issues/66362): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68066](https://github.com/ClickHouse/ClickHouse/issues/68066): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Backported in [#68566](https://github.com/ClickHouse/ClickHouse/issues/68566): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68159](https://github.com/ClickHouse/ClickHouse/issues/68159): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#66613](https://github.com/ClickHouse/ClickHouse/issues/66613): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66693](https://github.com/ClickHouse/ClickHouse/issues/66693): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
* Backported in [#66577](https://github.com/ClickHouse/ClickHouse/issues/66577): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66721](https://github.com/ClickHouse/ClickHouse/issues/66721): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#66670](https://github.com/ClickHouse/ClickHouse/issues/66670): Fix reading of uninitialized memory when hashing empty tuples. This closes [#66559](https://github.com/ClickHouse/ClickHouse/issues/66559). [#66562](https://github.com/ClickHouse/ClickHouse/pull/66562) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#66952](https://github.com/ClickHouse/ClickHouse/issues/66952): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66956](https://github.com/ClickHouse/ClickHouse/issues/66956): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66716](https://github.com/ClickHouse/ClickHouse/issues/66716): Fix removing named collections in local storage. [#66599](https://github.com/ClickHouse/ClickHouse/pull/66599) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#66759](https://github.com/ClickHouse/ClickHouse/issues/66759): Fix `Unknown identifier` and `Column is not under aggregate function` errors for queries with the expression `(column IS NULL).` The bug was triggered by [#65088](https://github.com/ClickHouse/ClickHouse/issues/65088), with the disabled analyzer only. [#66654](https://github.com/ClickHouse/ClickHouse/pull/66654) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66751](https://github.com/ClickHouse/ClickHouse/issues/66751): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#68116](https://github.com/ClickHouse/ClickHouse/issues/68116): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67635](https://github.com/ClickHouse/ClickHouse/issues/67635): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#67482](https://github.com/ClickHouse/ClickHouse/issues/67482): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)).
* Backported in [#67816](https://github.com/ClickHouse/ClickHouse/issues/67816): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67199](https://github.com/ClickHouse/ClickHouse/issues/67199): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#67381](https://github.com/ClickHouse/ClickHouse/issues/67381): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#67244](https://github.com/ClickHouse/ClickHouse/issues/67244): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#67503](https://github.com/ClickHouse/ClickHouse/issues/67503): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#67887](https://github.com/ClickHouse/ClickHouse/issues/67887): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67578](https://github.com/ClickHouse/ClickHouse/issues/67578): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68611](https://github.com/ClickHouse/ClickHouse/issues/68611): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#67852](https://github.com/ClickHouse/ClickHouse/issues/67852): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#68275](https://github.com/ClickHouse/ClickHouse/issues/68275): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#67808](https://github.com/ClickHouse/ClickHouse/issues/67808): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67838](https://github.com/ClickHouse/ClickHouse/issues/67838): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#67993](https://github.com/ClickHouse/ClickHouse/issues/67993): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68208](https://github.com/ClickHouse/ClickHouse/issues/68208): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68093](https://github.com/ClickHouse/ClickHouse/issues/68093): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#68124](https://github.com/ClickHouse/ClickHouse/issues/68124): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68221](https://github.com/ClickHouse/ClickHouse/issues/68221): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#68173](https://github.com/ClickHouse/ClickHouse/issues/68173): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
* Backported in [#68339](https://github.com/ClickHouse/ClickHouse/issues/68339): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#68396](https://github.com/ClickHouse/ClickHouse/issues/68396): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#68668](https://github.com/ClickHouse/ClickHouse/issues/68668): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Backport [#66599](https://github.com/ClickHouse/ClickHouse/issues/66599) to 24.6: Fix dropping named collection in local storage"'. [#66922](https://github.com/ClickHouse/ClickHouse/pull/66922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#66332](https://github.com/ClickHouse/ClickHouse/issues/66332): Do not raise a NOT_IMPLEMENTED error when getting s3 metrics with a multiple disk configuration. [#65403](https://github.com/ClickHouse/ClickHouse/pull/65403) ([Elena Torró](https://github.com/elenatorro)).
* Backported in [#66142](https://github.com/ClickHouse/ClickHouse/issues/66142): Fix flaky test_storage_s3_queue tests. [#66009](https://github.com/ClickHouse/ClickHouse/pull/66009) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#66389](https://github.com/ClickHouse/ClickHouse/issues/66389): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
* Backported in [#66428](https://github.com/ClickHouse/ClickHouse/issues/66428): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#66546](https://github.com/ClickHouse/ClickHouse/issues/66546): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#66861](https://github.com/ClickHouse/ClickHouse/issues/66861): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
* Backported in [#66877](https://github.com/ClickHouse/ClickHouse/issues/66877): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
* Backported in [#67061](https://github.com/ClickHouse/ClickHouse/issues/67061): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
* Backported in [#66940](https://github.com/ClickHouse/ClickHouse/issues/66940): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67254](https://github.com/ClickHouse/ClickHouse/issues/67254): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
* Backported in [#67414](https://github.com/ClickHouse/ClickHouse/issues/67414): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
* Update version after release. [#67909](https://github.com/ClickHouse/ClickHouse/pull/67909) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Backported in [#68079](https://github.com/ClickHouse/ClickHouse/issues/68079): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).

View File

@ -0,0 +1,33 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.6.4.42-stable (c534bb4b4dd) FIXME as compared to v24.6.3.95-stable (8325c920d11)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68066](https://github.com/ClickHouse/ClickHouse/issues/68066): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Backported in [#68566](https://github.com/ClickHouse/ClickHouse/issues/68566): Fix indexHint function case found by fuzzer. [#66286](https://github.com/ClickHouse/ClickHouse/pull/66286) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68159](https://github.com/ClickHouse/ClickHouse/issues/68159): Fix cluster() for inter-server secret (preserve initial user as before). [#66364](https://github.com/ClickHouse/ClickHouse/pull/66364) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#68116](https://github.com/ClickHouse/ClickHouse/issues/68116): Fix possible PARAMETER_OUT_OF_BOUND error during reading variant subcolumn. [#66659](https://github.com/ClickHouse/ClickHouse/pull/66659) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67887](https://github.com/ClickHouse/ClickHouse/issues/67887): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#68611](https://github.com/ClickHouse/ClickHouse/issues/68611): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#68275](https://github.com/ClickHouse/ClickHouse/issues/68275): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#67993](https://github.com/ClickHouse/ClickHouse/issues/67993): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68208](https://github.com/ClickHouse/ClickHouse/issues/68208): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68093](https://github.com/ClickHouse/ClickHouse/issues/68093): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#68124](https://github.com/ClickHouse/ClickHouse/issues/68124): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68221](https://github.com/ClickHouse/ClickHouse/issues/68221): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#68173](https://github.com/ClickHouse/ClickHouse/issues/68173): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
* Backported in [#68339](https://github.com/ClickHouse/ClickHouse/issues/68339): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#68396](https://github.com/ClickHouse/ClickHouse/issues/68396): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#68668](https://github.com/ClickHouse/ClickHouse/issues/68668): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Update version after release. [#67909](https://github.com/ClickHouse/ClickHouse/pull/67909) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Backported in [#68079](https://github.com/ClickHouse/ClickHouse/issues/68079): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
* Backported in [#68758](https://github.com/ClickHouse/ClickHouse/issues/68758): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).

View File

@ -0,0 +1,55 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.7.3.47-stable (2e50fe27a14) FIXME as compared to v24.7.2.13-stable (6e41f601b2f)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68232](https://github.com/ClickHouse/ClickHouse/issues/68232): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#67969](https://github.com/ClickHouse/ClickHouse/issues/67969): Fixed reading of subcolumns after `ALTER ADD COLUMN` query. [#66243](https://github.com/ClickHouse/ClickHouse/pull/66243) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68068](https://github.com/ClickHouse/ClickHouse/issues/68068): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Backported in [#67637](https://github.com/ClickHouse/ClickHouse/issues/67637): Fix for occasional deadlock in Context::getDDLWorker. [#66843](https://github.com/ClickHouse/ClickHouse/pull/66843) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#67820](https://github.com/ClickHouse/ClickHouse/issues/67820): Fix possible deadlock on query cancel with parallel replicas. [#66905](https://github.com/ClickHouse/ClickHouse/pull/66905) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#67818](https://github.com/ClickHouse/ClickHouse/issues/67818): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67766](https://github.com/ClickHouse/ClickHouse/issues/67766): Fix crash of `uniq` and `uniqTheta ` with `tuple()` argument. Closes [#67303](https://github.com/ClickHouse/ClickHouse/issues/67303). [#67306](https://github.com/ClickHouse/ClickHouse/pull/67306) ([flynn](https://github.com/ucasfl)).
* Backported in [#67881](https://github.com/ClickHouse/ClickHouse/issues/67881): Correctly parse file name/URI containing `::` if it's not an archive. [#67433](https://github.com/ClickHouse/ClickHouse/pull/67433) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#68613](https://github.com/ClickHouse/ClickHouse/issues/68613): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#67854](https://github.com/ClickHouse/ClickHouse/issues/67854): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#68278](https://github.com/ClickHouse/ClickHouse/issues/68278): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68040](https://github.com/ClickHouse/ClickHouse/issues/68040): Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#68038](https://github.com/ClickHouse/ClickHouse/issues/68038): Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
* Backported in [#67713](https://github.com/ClickHouse/ClickHouse/issues/67713): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#67840](https://github.com/ClickHouse/ClickHouse/issues/67840): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#67995](https://github.com/ClickHouse/ClickHouse/issues/67995): Validate experimental/suspicious data types in ALTER ADD/MODIFY COLUMN. [#67911](https://github.com/ClickHouse/ClickHouse/pull/67911) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68224](https://github.com/ClickHouse/ClickHouse/issues/68224): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68095](https://github.com/ClickHouse/ClickHouse/issues/68095): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#68126](https://github.com/ClickHouse/ClickHouse/issues/68126): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68223](https://github.com/ClickHouse/ClickHouse/issues/68223): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#68175](https://github.com/ClickHouse/ClickHouse/issues/68175): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
* Backported in [#68341](https://github.com/ClickHouse/ClickHouse/issues/68341): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#68398](https://github.com/ClickHouse/ClickHouse/issues/68398): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#68669](https://github.com/ClickHouse/ClickHouse/issues/68669): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#67518](https://github.com/ClickHouse/ClickHouse/issues/67518): Split slow test 03036_dynamic_read_subcolumns. [#66954](https://github.com/ClickHouse/ClickHouse/pull/66954) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#67516](https://github.com/ClickHouse/ClickHouse/issues/67516): Split 01508_partition_pruning_long. [#66983](https://github.com/ClickHouse/ClickHouse/pull/66983) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#67529](https://github.com/ClickHouse/ClickHouse/issues/67529): Reduce max time of 00763_long_lock_buffer_alter_destination_table. [#67185](https://github.com/ClickHouse/ClickHouse/pull/67185) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#67803](https://github.com/ClickHouse/ClickHouse/issues/67803): Disable some Dynamic tests under sanitizers, rewrite 03202_dynamic_null_map_subcolumn to sql. [#67359](https://github.com/ClickHouse/ClickHouse/pull/67359) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#67643](https://github.com/ClickHouse/ClickHouse/issues/67643): [Green CI] Fix potentially flaky test_mask_sensitive_info integration test. [#67506](https://github.com/ClickHouse/ClickHouse/pull/67506) ([Alexey Katsman](https://github.com/alexkats)).
* Backported in [#67609](https://github.com/ClickHouse/ClickHouse/issues/67609): Fix test_zookeeper_config_load_balancing after adding the xdist worker name to the instance. [#67590](https://github.com/ClickHouse/ClickHouse/pull/67590) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#67871](https://github.com/ClickHouse/ClickHouse/issues/67871): Fix 02434_cancel_insert_when_client_dies. [#67600](https://github.com/ClickHouse/ClickHouse/pull/67600) ([vdimir](https://github.com/vdimir)).
* Backported in [#67704](https://github.com/ClickHouse/ClickHouse/issues/67704): Fix 02910_bad_logs_level_in_local in fast tests. [#67603](https://github.com/ClickHouse/ClickHouse/pull/67603) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#67689](https://github.com/ClickHouse/ClickHouse/issues/67689): Fix 01605_adaptive_granularity_block_borders. [#67605](https://github.com/ClickHouse/ClickHouse/pull/67605) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#67827](https://github.com/ClickHouse/ClickHouse/issues/67827): Try fix 03143_asof_join_ddb_long. [#67620](https://github.com/ClickHouse/ClickHouse/pull/67620) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#67892](https://github.com/ClickHouse/ClickHouse/issues/67892): Revert "Merge pull request [#66510](https://github.com/ClickHouse/ClickHouse/issues/66510) from canhld94/fix_trivial_count_non_deterministic_func". [#67800](https://github.com/ClickHouse/ClickHouse/pull/67800) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68081](https://github.com/ClickHouse/ClickHouse/issues/68081): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
* Update version after release. [#68044](https://github.com/ClickHouse/ClickHouse/pull/68044) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Backported in [#68269](https://github.com/ClickHouse/ClickHouse/issues/68269): [Green CI] Fix test 01903_correct_block_size_prediction_with_default. [#68203](https://github.com/ClickHouse/ClickHouse/pull/68203) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#68432](https://github.com/ClickHouse/ClickHouse/issues/68432): tests: make 01600_parts_states_metrics_long better. [#68265](https://github.com/ClickHouse/ClickHouse/pull/68265) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#68538](https://github.com/ClickHouse/ClickHouse/issues/68538): CI: Native build for package_aarch64. [#68457](https://github.com/ClickHouse/ClickHouse/pull/68457) ([Max K.](https://github.com/maxknv)).
* Backported in [#68555](https://github.com/ClickHouse/ClickHouse/issues/68555): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)).

View File

@ -0,0 +1,36 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.7.4.51-stable (70fe2f6fa52) FIXME as compared to v24.7.3.42-stable (63730bc4293)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68232](https://github.com/ClickHouse/ClickHouse/issues/68232): Fixed `Not-ready Set` in some system tables when filtering using subqueries. [#66018](https://github.com/ClickHouse/ClickHouse/pull/66018) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#68068](https://github.com/ClickHouse/ClickHouse/issues/68068): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)).
* Backported in [#68613](https://github.com/ClickHouse/ClickHouse/issues/68613): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#68278](https://github.com/ClickHouse/ClickHouse/issues/68278): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68040](https://github.com/ClickHouse/ClickHouse/issues/68040): Fix creation of view with recursive CTE. [#67587](https://github.com/ClickHouse/ClickHouse/pull/67587) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#68038](https://github.com/ClickHouse/ClickHouse/issues/68038): Fix crash on `percent_rank`. `percent_rank`'s default frame type is changed to `range unbounded preceding and unbounded following`. `IWindowFunction`'s default window frame is considered and now window functions without window frame definition in sql can be put into different `WindowTransfomer`s properly. [#67661](https://github.com/ClickHouse/ClickHouse/pull/67661) ([lgbo](https://github.com/lgbo-ustc)).
* Backported in [#68224](https://github.com/ClickHouse/ClickHouse/issues/68224): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Backported in [#68095](https://github.com/ClickHouse/ClickHouse/issues/68095): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#68126](https://github.com/ClickHouse/ClickHouse/issues/68126): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#68223](https://github.com/ClickHouse/ClickHouse/issues/68223): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#68175](https://github.com/ClickHouse/ClickHouse/issues/68175): Removes an incorrect optimization to remove sorting in subqueries that use `OFFSET`. Fixes [#67906](https://github.com/ClickHouse/ClickHouse/issues/67906). [#68099](https://github.com/ClickHouse/ClickHouse/pull/68099) ([Graham Campbell](https://github.com/GrahamCampbell)).
* Backported in [#68341](https://github.com/ClickHouse/ClickHouse/issues/68341): Try fix postgres crash when query is cancelled. [#68288](https://github.com/ClickHouse/ClickHouse/pull/68288) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#68398](https://github.com/ClickHouse/ClickHouse/issues/68398): Fix missing sync replica mode in query `SYSTEM SYNC REPLICA`. [#68326](https://github.com/ClickHouse/ClickHouse/pull/68326) ([Duc Canh Le](https://github.com/canhld94)).
* Backported in [#68669](https://github.com/ClickHouse/ClickHouse/issues/68669): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#67803](https://github.com/ClickHouse/ClickHouse/issues/67803): Disable some Dynamic tests under sanitizers, rewrite 03202_dynamic_null_map_subcolumn to sql. [#67359](https://github.com/ClickHouse/ClickHouse/pull/67359) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68081](https://github.com/ClickHouse/ClickHouse/issues/68081): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)).
* Update version after release. [#68044](https://github.com/ClickHouse/ClickHouse/pull/68044) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Backported in [#68269](https://github.com/ClickHouse/ClickHouse/issues/68269): [Green CI] Fix test 01903_correct_block_size_prediction_with_default. [#68203](https://github.com/ClickHouse/ClickHouse/pull/68203) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#68432](https://github.com/ClickHouse/ClickHouse/issues/68432): tests: make 01600_parts_states_metrics_long better. [#68265](https://github.com/ClickHouse/ClickHouse/pull/68265) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#68538](https://github.com/ClickHouse/ClickHouse/issues/68538): CI: Native build for package_aarch64. [#68457](https://github.com/ClickHouse/ClickHouse/pull/68457) ([Max K.](https://github.com/maxknv)).
* Backported in [#68555](https://github.com/ClickHouse/ClickHouse/issues/68555): CI: Minor release workflow fix. [#68536](https://github.com/ClickHouse/ClickHouse/pull/68536) ([Max K.](https://github.com/maxknv)).
* Backported in [#68760](https://github.com/ClickHouse/ClickHouse/issues/68760): To make patch release possible from every commit on release branch, package_debug build is required and must not be skipped. [#68750](https://github.com/ClickHouse/ClickHouse/pull/68750) ([Max K.](https://github.com/maxknv)).

View File

@ -0,0 +1,12 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.8.2.3-lts (b54f79ed323) FIXME as compared to v24.8.1.2684-lts (161c62fd295)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#68670](https://github.com/ClickHouse/ClickHouse/issues/68670): Fix `LOGICAL_ERROR`s when functions `sipHash64Keyed`, `sipHash128Keyed`, or `sipHash128ReferenceKeyed` are applied to empty arrays or tuples. [#68630](https://github.com/ClickHouse/ClickHouse/pull/68630) ([Robert Schulze](https://github.com/rschu1ze)).

View File

@ -6,28 +6,34 @@ sidebar_label: Iceberg
# Iceberg Table Engine
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3.
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure and locally stored tables.
## Create Table
Note that the Iceberg table must already exist in S3, this command does not take DDL parameters to create a new table.
Note that the Iceberg table must already exist in the storage, this command does not take DDL parameters to create a new table.
``` sql
CREATE TABLE iceberg_table
ENGINE = Iceberg(url, [aws_access_key_id, aws_secret_access_key,])
CREATE TABLE iceberg_table_s3
ENGINE = IcebergS3(url, [, NOSIGN | access_key_id, secret_access_key, [session_token]], format, [,compression])
CREATE TABLE iceberg_table_azure
ENGINE = IcebergAzure(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression])
CREATE TABLE iceberg_table_local
ENGINE = IcebergLocal(path_to_table, [,format] [,compression_method])
```
**Engine parameters**
**Engine arguments**
- `url` — url with the path to an existing Iceberg table.
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file.
Description of the arguments coincides with description of arguments in engines `S3`, `AzureBlobStorage` and `File` correspondingly.
`format` stands for the format of data files in the Iceberg table.
Engine parameters can be specified using [Named Collections](../../../operations/named-collections.md)
**Example**
```sql
CREATE TABLE iceberg_table ENGINE=Iceberg('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
CREATE TABLE iceberg_table ENGINE=IcebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
```
Using named collections:
@ -45,9 +51,15 @@ Using named collections:
```
```sql
CREATE TABLE iceberg_table ENGINE=Iceberg(iceberg_conf, filename = 'test_table')
CREATE TABLE iceberg_table ENGINE=IcebergS3(iceberg_conf, filename = 'test_table')
```
**Aliases**
Table engine `Iceberg` is an alias to `IcebergS3` now.
## See also
- [iceberg table function](/docs/en/sql-reference/table-functions/iceberg.md)

View File

@ -80,7 +80,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
`PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional.
Specifying a sorting key (using `ORDER BY` clause) implicitly specifies a primary key.
It is usually not necessary to specify the primary key in addition to the primary key.
It is usually not necessary to specify the primary key in addition to the sorting key.
#### SAMPLE BY

View File

@ -109,6 +109,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
- `_headers` - HTTP response headers. Type: `Map(LowCardinality(String), LowCardinality(String))`.
## Storage Settings {#storage-settings}

View File

@ -1389,7 +1389,7 @@ DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : nul
#### schema_inference_make_columns_nullable
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will be `Nullable` only if `input_format_null_as_default` is disabled and the column contains `NULL` in a sample that is parsed during schema inference.
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will never be `Nullable`, if set to `auto`, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference or file metadata contains information about column nullability.
Enabled by default.
@ -1412,15 +1412,13 @@ DESC format(JSONEachRow, $$
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
```
```sql
SET schema_inference_make_columns_nullable = 0;
SET input_format_null_as_default = 0;
SET schema_inference_make_columns_nullable = 'auto';
DESC format(JSONEachRow, $$
{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}
{"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]}
$$)
```
```response
┌─name────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ id │ Int64 │ │ │ │ │ │
│ age │ Int64 │ │ │ │ │ │
@ -1432,7 +1430,6 @@ DESC format(JSONEachRow, $$
```sql
SET schema_inference_make_columns_nullable = 0;
SET input_format_null_as_default = 1;
DESC format(JSONEachRow, $$
{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}
{"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]}

View File

@ -171,8 +171,8 @@ If the `schema_inference_hints` is not formated properly, or if there is a typo
## schema_inference_make_columns_nullable {#schema_inference_make_columns_nullable}
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
If the setting is enabled, the inferred type will be `Nullable` only if column contains `NULL` in a sample that is parsed during schema inference.
Controls making inferred types `Nullable` in schema inference.
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will never be `Nullable`, if set to `auto`, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference or file metadata contains information about column nullability.
Default value: `true`.

View File

@ -70,7 +70,7 @@ SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json
└────────────────────────────────────────────────┘
```
CAST from named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
CAST from `JSON`, named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
## Reading JSON paths as subcolumns

View File

@ -4287,7 +4287,7 @@ Result:
## fromModifiedJulianDay
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973119` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973483` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
**Syntax**

View File

@ -6,7 +6,7 @@ title: "Functions for Working with Geohash"
## Geohash
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earths surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earths surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer the geohash string is, the more precise the geographic location will be.
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).
@ -14,26 +14,37 @@ If you need to manually convert geographic coordinates to geohash strings, you c
Encodes latitude and longitude as a [geohash](#geohash)-string.
**Syntax**
``` sql
geohashEncode(longitude, latitude, [precision])
```
**Input values**
- longitude - longitude part of the coordinate you want to encode. Floating in range`[-180°, 180°]`
- latitude - latitude part of the coordinate you want to encode. Floating in range `[-90°, 90°]`
- precision - Optional, length of the resulting encoded string, defaults to `12`. Integer in range `[1, 12]`. Any value less than `1` or greater than `12` is silently converted to `12`.
- `longitude` — Longitude part of the coordinate you want to encode. Floating in range`[-180°, 180°]`. [Float](../../data-types/float.md).
- `latitude` — Latitude part of the coordinate you want to encode. Floating in range `[-90°, 90°]`. [Float](../../data-types/float.md).
- `precision` (optional) — Length of the resulting encoded string. Defaults to `12`. Integer in the range `[1, 12]`. [Int8](../../data-types/int-uint.md).
:::note
- All coordinate parameters must be of the same type: either `Float32` or `Float64`.
- For the `precision` parameter, any value less than `1` or greater than `12` is silently converted to `12`.
:::
**Returned values**
- alphanumeric `String` of encoded coordinate (modified version of the base32-encoding alphabet is used).
- Alphanumeric string of the encoded coordinate (modified version of the base32-encoding alphabet is used). [String](../../data-types/string.md).
**Example**
Query:
``` sql
SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res;
```
Result:
``` text
┌─res──────────┐
│ ezs42d000000 │
@ -44,13 +55,19 @@ SELECT geohashEncode(-5.60302734375, 42.593994140625, 0) AS res;
Decodes any [geohash](#geohash)-encoded string into longitude and latitude.
**Syntax**
```sql
geohashDecode(hash_str)
```
**Input values**
- encoded string - geohash-encoded string.
- `hash_str` — Geohash-encoded string.
**Returned values**
- (longitude, latitude) - 2-tuple of `Float64` values of longitude and latitude.
- Tuple `(longitude, latitude)` of `Float64` values of longitude and latitude. [Tuple](../../data-types/tuple.md)([Float64](../../data-types/float.md))
**Example**

View File

@ -688,6 +688,40 @@ SELECT kostikConsistentHash(16045690984833335023, 2);
└───────────────────────────────────────────────┘
```
## ripeMD160
Produces [RIPEMD-160](https://en.wikipedia.org/wiki/RIPEMD) hash value.
**Syntax**
```sql
ripeMD160(input)
```
**Parameters**
- `input`: Input string. [String](../data-types/string.md)
**Returned value**
- A [UInt256](../data-types/int-uint.md) hash value where the 160-bit RIPEMD-160 hash is stored in the first 20 bytes. The remaining 12 bytes are zero-padded.
**Example**
Use the [hex](../functions/encoding-functions.md/#hex) function to represent the result as a hex-encoded string.
Query:
```sql
SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'));
```
```response
┌─hex(ripeMD160('The quick brown fox jumps over the lazy dog'))─┐
│ 37F332F68DB77BD9D7EDD4969571AD671CF9DD3B │
└───────────────────────────────────────────────────────────────┘
```
## murmurHash2_32, murmurHash2_64
Produces a [MurmurHash2](https://github.com/aappleby/smhasher) hash value.

View File

@ -6,35 +6,37 @@ sidebar_label: iceberg
# iceberg Table Function
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3.
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure or locally stored.
## Syntax
``` sql
iceberg(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure])
icebergS3(url [, NOSIGN | access_key_id, secret_access_key, [session_token]] [,format] [,compression_method])
icebergS3(named_collection[, option=value [,..]])
icebergAzure(connection_string|storage_account_url, container_name, blobpath, [,account_name], [,account_key] [,format] [,compression_method])
icebergAzure(named_collection[, option=value [,..]])
icebergLocal(path_to_table, [,format] [,compression_method])
icebergLocal(named_collection[, option=value [,..]])
```
## Arguments
- `url` — Bucket url with the path to an existing Iceberg table in S3.
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. These parameters are optional. If credentials are not specified, they are used from the ClickHouse configuration. For more information see [Using S3 for Data Storage](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3).
- `format` — The [format](/docs/en/interfaces/formats.md/#formats) of the file. By default `Parquet` is used.
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
Engine parameters can be specified using [Named Collections](/docs/en/operations/named-collections.md).
Description of the arguments coincides with description of arguments in table functions `s3`, `azureBlobStorage` and `file` correspondingly.
`format` stands for the format of data files in the Iceberg table.
**Returned value**
A table with the specified structure for reading data in the specified Iceberg table in S3.
A table with the specified structure for reading data in the specified Iceberg table.
**Example**
```sql
SELECT * FROM iceberg('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
```
:::important
ClickHouse currently supports reading v1 (v2 support is coming soon!) of the Iceberg format via the `iceberg` table function and `Iceberg` table engine.
ClickHouse currently supports reading v1 and v2 of the Iceberg format via the `icebergS3`, `icebergAzure` and `icebergLocal` table functions and `IcebergS3`, `icebergAzure` ans `icebergLocal` table engines.
:::
## Defining a named collection
@ -56,10 +58,14 @@ Here is an example of configuring a named collection for storing the URL and cre
```
```sql
SELECT * FROM iceberg(iceberg_conf, filename = 'test_table')
DESCRIBE iceberg(iceberg_conf, filename = 'test_table')
SELECT * FROM icebergS3(iceberg_conf, filename = 'test_table')
DESCRIBE icebergS3(iceberg_conf, filename = 'test_table')
```
**Aliases**
Table function `iceberg` is an alias to `icebergS3` now.
**See Also**
- [Iceberg engine](/docs/en/engines/table-engines/integrations/iceberg.md)

View File

@ -54,6 +54,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
- `_headers` - HTTP response headers. Type: `Map(LowCardinality(String), LowCardinality(String))`.
## Hive-style partitioning {#hive-style-partitioning}

View File

@ -22,18 +22,26 @@ grep -q sse4_2 /proc/cpuinfo && echo "SSE 4.2 supported" || echo "SSE 4.2 not su
### Из deb-пакетов {#install-from-deb-packages}
Яндекс рекомендует использовать официальные скомпилированные `deb`-пакеты для Debian или Ubuntu. Для установки пакетов выполните:
Рекомендуется использовать официальные скомпилированные `deb`-пакеты для Debian или Ubuntu. Для установки пакетов выполните:
``` bash
sudo apt-get install -y apt-transport-https ca-certificates dirmngr
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg
curl -fsSL 'https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key' | sudo gpg --dearmor -o /usr/share/keyrings/clickhouse-keyring.gpg
echo "deb https://packages.clickhouse.com/deb stable main" | sudo tee \
echo "deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb stable main" | sudo tee \
/etc/apt/sources.list.d/clickhouse.list
sudo apt-get update
```
#### Установка ClickHouse server и client
```bash
sudo apt-get install -y clickhouse-server clickhouse-client
```
#### Запуск ClickHouse server
```bash
sudo service clickhouse-server start
clickhouse-client # or "clickhouse-client --password" if you've set up a password.
```
@ -55,7 +63,7 @@ clickhouse-client # or "clickhouse-client --password" if you've set up a passwor
:::
### Из rpm-пакетов {#from-rpm-packages}
Команда ClickHouse в Яндексе рекомендует использовать официальные предкомпилированные `rpm`-пакеты для CentOS, RedHat и всех остальных дистрибутивов Linux, основанных на rpm.
Команда ClickHouse рекомендует использовать официальные предкомпилированные `rpm`-пакеты для CentOS, RedHat и всех остальных дистрибутивов Linux, основанных на rpm.
#### Установка официального репозитория
@ -102,7 +110,7 @@ sudo yum install clickhouse-server clickhouse-client
### Из tgz-архивов {#from-tgz-archives}
Команда ClickHouse в Яндексе рекомендует использовать предкомпилированные бинарники из `tgz`-архивов для всех дистрибутивов, где невозможна установка `deb`- и `rpm`- пакетов.
Команда ClickHouse рекомендует использовать предкомпилированные бинарники из `tgz`-архивов для всех дистрибутивов, где невозможна установка `deb`- и `rpm`- пакетов.
Интересующую версию архивов можно скачать вручную с помощью `curl` или `wget` из репозитория https://packages.clickhouse.com/tgz/.
После этого архивы нужно распаковать и воспользоваться скриптами установки. Пример установки самой свежей версии:

View File

@ -124,6 +124,40 @@ SELECT hex(sipHash128('foo', '\x01', 3));
└──────────────────────────────────┘
```
## ripeMD160
Генерирует [RIPEMD-160](https://en.wikipedia.org/wiki/RIPEMD) хеш строки.
**Синтаксис**
```sql
ripeMD160(input)
```
**Аргументы**
- `input`: Строка [String](../data-types/string.md)
**Возвращаемое значение**
- [UInt256](../data-types/int-uint.md), где 160-битный хеш RIPEMD-160 хранится в первых 20 байтах. Оставшиеся 12 байт заполняются нулями.
**Пример**
Используйте функцию [hex](../functions/encoding-functions.md#hex) для представления результата в виде строки с шестнадцатеричной кодировкой
Запрос:
```sql
SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'));
```
Результат:
```response
┌─hex(ripeMD160('The quick brown fox jumps over the lazy dog'))─┐
│ 37F332F68DB77BD9D7EDD4969571AD671CF9DD3B │
└───────────────────────────────────────────────────────────────┘
```
## cityHash64 {#cityhash64}
Генерирует 64-х битное значение [CityHash](https://github.com/google/cityhash).

View File

@ -1157,7 +1157,7 @@ SELECT toModifiedJulianDayOrNull('2020-01-01');
## fromModifiedJulianDay {#frommodifiedjulianday}
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941``2973119` 的天数(分别代表 0000-01-01 和 9999-12-31。如果天数超出支持范围则会引发异常。
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941``2973483` 的天数(分别代表 0000-01-01 和 9999-12-31。如果天数超出支持范围则会引发异常。
**语法**

View File

@ -978,6 +978,7 @@ try
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
* At this moment, no one could own shared part of Context.
*/
global_context->resetSharedContext();
global_context.reset();
shared_context.reset();
LOG_DEBUG(log, "Destroyed global context.");

View File

@ -120,7 +120,7 @@ void RoleCache::collectEnabledRoles(EnabledRoles & enabled_roles, SubscriptionsO
SubscriptionsOnRoles new_subscriptions_on_roles;
new_subscriptions_on_roles.reserve(subscriptions_on_roles.size());
auto get_role_function = [this, &subscriptions_on_roles](const UUID & id) TSA_NO_THREAD_SAFETY_ANALYSIS { return getRole(id, subscriptions_on_roles); };
auto get_role_function = [this, &new_subscriptions_on_roles](const UUID & id) TSA_NO_THREAD_SAFETY_ANALYSIS { return getRole(id, new_subscriptions_on_roles); };
for (const auto & current_role : enabled_roles.params.current_roles)
collectRoles(*new_info, skip_ids, get_role_function, current_role, true, false);

View File

@ -692,7 +692,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromStorage(
result_column_node = it->second;
}
/// Check if it's a dynamic subcolumn
else
else if (table_expression_data.supports_subcolumns)
{
auto [column_name, dynamic_subcolumn_name] = Nested::splitName(identifier_full_name);
auto jt = table_expression_data.column_name_to_column_node.find(column_name);

View File

@ -4379,7 +4379,10 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
auto get_column_options = GetColumnsOptions(GetColumnsOptions::All).withExtendedObjects().withVirtuals();
if (storage_snapshot->storage.supportsSubcolumns())
{
get_column_options.withSubcolumns();
table_expression_data.supports_subcolumns = true;
}
auto column_names_and_types = storage_snapshot->getColumns(get_column_options);
table_expression_data.column_names_and_types = NamesAndTypes(column_names_and_types.begin(), column_names_and_types.end());

View File

@ -36,6 +36,7 @@ struct AnalysisTableExpressionData
std::string database_name;
std::string table_name;
bool should_qualify_columns = true;
bool supports_subcolumns = false;
NamesAndTypes column_names_and_types;
ColumnNameToColumnNodeMap column_name_to_column_node;
std::unordered_set<std::string> subcolumn_names; /// Subset columns that are subcolumns of other columns

View File

@ -100,6 +100,7 @@ protected:
auto buf = BuilderRWBufferFromHTTP(getPingURI())
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
.withTimeouts(getHTTPTimeouts())
.withSettings(getContext()->getReadSettings())
.create(credentials);
return checkString(PING_OK_ANSWER, *buf);
@ -206,6 +207,7 @@ protected:
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
.withMethod(Poco::Net::HTTPRequest::HTTP_POST)
.withTimeouts(getHTTPTimeouts())
.withSettings(getContext()->getReadSettings())
.create(credentials);
bool res = false;
@ -232,6 +234,7 @@ protected:
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
.withMethod(Poco::Net::HTTPRequest::HTTP_POST)
.withTimeouts(getHTTPTimeouts())
.withSettings(getContext()->getReadSettings())
.create(credentials);
std::string character;

View File

@ -111,6 +111,7 @@ add_headers_and_sources(dbms Storages/ObjectStorage)
add_headers_and_sources(dbms Storages/ObjectStorage/Azure)
add_headers_and_sources(dbms Storages/ObjectStorage/S3)
add_headers_and_sources(dbms Storages/ObjectStorage/HDFS)
add_headers_and_sources(dbms Storages/ObjectStorage/Local)
add_headers_and_sources(dbms Storages/ObjectStorage/DataLakes)
add_headers_and_sources(dbms Common/NamedCollections)

View File

@ -145,6 +145,9 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
/// work we need to pass host name separately. It will be send into TLS Hello packet to let
/// the server know which host we want to talk with (single IP can process requests for multiple hosts using SNI).
static_cast<Poco::Net::SecureStreamSocket*>(socket.get())->setPeerHostName(host);
/// we want to postpone SSL handshake until first read or write operation
/// so any errors during negotiation would be properly processed
static_cast<Poco::Net::SecureStreamSocket*>(socket.get())->setLazyHandshake(true);
#else
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "tcp_secure protocol is disabled because poco library was built without NetSSL support.");
#endif

View File

@ -299,13 +299,14 @@ ReplxxLineReader::ReplxxLineReader(
Patterns delimiters_,
const char word_break_characters_[],
replxx::Replxx::highlighter_callback_t highlighter_,
[[ maybe_unused ]] std::istream & input_stream_,
[[ maybe_unused ]] std::ostream & output_stream_,
[[ maybe_unused ]] int in_fd_,
[[ maybe_unused ]] int out_fd_,
[[ maybe_unused ]] int err_fd_
std::istream & input_stream_,
std::ostream & output_stream_,
int in_fd_,
int out_fd_,
int err_fd_
)
: LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_), input_stream_, output_stream_, in_fd_)
, rx(input_stream_, output_stream_, in_fd_, out_fd_, err_fd_)
, highlighter(std::move(highlighter_))
, word_break_characters(word_break_characters_)
, editor(getEditor())
@ -516,7 +517,7 @@ void ReplxxLineReader::addToHistory(const String & line)
rx.history_add(line);
// flush changes to the disk
if (!rx.history_save(history_file_path))
if (history_file_fd >= 0 && !rx.history_save(history_file_path))
rx.print("Saving history failed: %s\n", errnoToString().c_str());
if (history_file_fd >= 0 && locked && 0 != flock(history_file_fd, LOCK_UN))

View File

@ -1181,13 +1181,14 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source
/// Check if the number of all dynamic types exceeds the limit.
if (!canAddNewVariants(0, all_variants.size()))
{
/// Create list of variants with their sizes and sort it.
std::vector<std::pair<size_t, DataTypePtr>> variants_with_sizes;
/// Create a list of variants with their sizes and names and then sort it.
std::vector<std::tuple<size_t, String, DataTypePtr>> variants_with_sizes;
variants_with_sizes.reserve(all_variants.size());
for (const auto & variant : all_variants)
{
if (variant->getName() != getSharedVariantTypeName())
variants_with_sizes.emplace_back(total_sizes[variant->getName()], variant);
auto variant_name = variant->getName();
if (variant_name != getSharedVariantTypeName())
variants_with_sizes.emplace_back(total_sizes[variant_name], variant_name, variant);
}
std::sort(variants_with_sizes.begin(), variants_with_sizes.end(), std::greater());
@ -1196,14 +1197,14 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source
result_variants.reserve(max_dynamic_types + 1); /// +1 for shared variant.
/// Add shared variant.
result_variants.push_back(getSharedVariantDataType());
for (const auto & [size, variant] : variants_with_sizes)
for (const auto & [size, variant_name, variant_type] : variants_with_sizes)
{
/// Add variant to the resulting variants list until we reach max_dynamic_types.
if (canAddNewVariant(result_variants.size()))
result_variants.push_back(variant);
result_variants.push_back(variant_type);
/// Add all remaining variants into shared_variants_statistics until we reach its max size.
else if (new_statistics.shared_variants_statistics.size() < Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE)
new_statistics.shared_variants_statistics[variant->getName()] = size;
new_statistics.shared_variants_statistics[variant_name] = size;
else
break;
}

View File

@ -127,7 +127,7 @@ std::string ColumnObject::getName() const
{
WriteBufferFromOwnString ss;
ss << "Object(";
ss << "max_dynamic_paths=" << max_dynamic_paths;
ss << "max_dynamic_paths=" << global_max_dynamic_paths;
ss << ", max_dynamic_types=" << max_dynamic_types;
std::vector<String> sorted_typed_paths;
sorted_typed_paths.reserve(typed_paths.size());
@ -1045,9 +1045,9 @@ void ColumnObject::forEachSubcolumnRecursively(DB::IColumn::RecursiveMutableColu
bool ColumnObject::structureEquals(const IColumn & rhs) const
{
/// 2 Object columns have equal structure if they have the same typed paths and max_dynamic_paths/max_dynamic_types.
/// 2 Object columns have equal structure if they have the same typed paths and global_max_dynamic_paths/max_dynamic_types.
const auto * rhs_object = typeid_cast<const ColumnObject *>(&rhs);
if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() || max_dynamic_paths != rhs_object->max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types)
if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() || global_max_dynamic_paths != rhs_object->global_max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types)
return false;
for (const auto & [path, column] : typed_paths)

View File

@ -953,7 +953,7 @@ ColumnPtr ColumnVariant::index(const IColumn & indexes, size_t limit) const
{
/// If we have only NULLs, index will take no effect, just return resized column.
if (hasOnlyNulls())
return cloneResized(limit);
return cloneResized(limit == 0 ? indexes.size(): limit);
/// Optimization when we have only one non empty variant and no NULLs.
/// In this case local_discriminators column is filled with identical values and offsets column
@ -1009,8 +1009,16 @@ ColumnPtr ColumnVariant::indexImpl(const PaddedPODArray<Type> & indexes, size_t
new_variants.reserve(num_variants);
for (size_t i = 0; i != num_variants; ++i)
{
size_t nested_limit = nested_perms[i].size() == variants[i]->size() ? 0 : nested_perms[i].size();
new_variants.emplace_back(variants[i]->permute(nested_perms[i], nested_limit));
/// Check if no values from this variant were selected.
if (nested_perms[i].empty())
{
new_variants.emplace_back(variants[i]->cloneEmpty());
}
else
{
size_t nested_limit = nested_perms[i].size() == variants[i]->size() ? 0 : nested_perms[i].size();
new_variants.emplace_back(variants[i]->permute(nested_perms[i], nested_limit));
}
}
/// We cannot use new_offsets column as an offset column, because it became invalid after variants permutation.

View File

@ -273,6 +273,25 @@ void SystemLogBase<LogElement>::startup()
saving_thread = std::make_unique<ThreadFromGlobalPool>([this] { savingThreadFunction(); });
}
template <typename LogElement>
void SystemLogBase<LogElement>::stopFlushThread()
{
{
std::lock_guard lock(thread_mutex);
if (!saving_thread || !saving_thread->joinable())
return;
if (is_shutdown)
return;
is_shutdown = true;
queue->shutdown();
}
saving_thread->join();
}
template <typename LogElement>
void SystemLogBase<LogElement>::add(LogElement element)
{

View File

@ -216,6 +216,8 @@ public:
static consteval bool shouldTurnOffLogger() { return false; }
protected:
void stopFlushThread() final;
std::shared_ptr<SystemLogQueue<LogElement>> queue;
};
}

View File

@ -1119,7 +1119,7 @@ class IColumn;
M(String, column_names_for_schema_inference, "", "The list of column names to use in schema inference for formats without column names. The format: 'column1,column2,column3,...'", 0) \
M(String, schema_inference_hints, "", "The list of column names and types to use in schema inference for formats without column names. The format: 'column_name1 column_type1, column_name2 column_type2, ...'", 0) \
M(SchemaInferenceMode, schema_inference_mode, "default", "Mode of schema inference. 'default' - assume that all files have the same schema and schema can be inferred from any file, 'union' - files can have different schemas and the resulting schema should be the a union of schemas of all files", 0) \
M(Bool, schema_inference_make_columns_nullable, true, "If set to true, all inferred types will be Nullable in schema inference for formats without information about nullability.", 0) \
M(UInt64Auto, schema_inference_make_columns_nullable, 1, "If set to true, all inferred types will be Nullable in schema inference. When set to false, no columns will be converted to Nullable. When set to 'auto', ClickHouse will use information about nullability from the data.", 0) \
M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \
M(Bool, input_format_json_read_bools_as_strings, true, "Allow to parse bools as strings in JSON input formats", 0) \
M(Bool, input_format_json_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference", 0) \

View File

@ -72,11 +72,13 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"24.9",
{
{"input_format_try_infer_variants", false, false, "Try to infer Variant type in text formats when there is more than one possible type for column/array elements"},
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
{"create_if_not_exists", false, false, "New setting."},
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
}
},
{"24.8",
{
{"create_if_not_exists", false, false, "New setting."},
{"rows_before_aggregation", true, true, "Provide exact value for rows_before_aggregation statistic, represents the number of rows read before aggregation"},
{"restore_replace_external_table_functions_to_null", false, false, "New setting."},
{"restore_replace_external_engines_to_null", false, false, "New setting."},
@ -85,7 +87,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"use_hive_partitioning", false, false, "Allows to use hive partitioning for File, URL, S3, AzureBlobStorage and HDFS engines."},
{"allow_experimental_kafka_offsets_storage_in_keeper", false, false, "Allow the usage of experimental Kafka storage engine that stores the committed offsets in ClickHouse Keeper"},
{"allow_archive_path_syntax", true, true, "Added new setting to allow disabling archive path syntax."},
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
{"query_cache_tag", "", "", "New setting for labeling query cache settings."},
{"allow_experimental_time_series_table", false, false, "Added new setting to allow the TimeSeries table engine"},
{"enable_analyzer", 1, 1, "Added an alias to a setting `allow_experimental_analyzer`."},
@ -93,7 +94,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"allow_experimental_json_type", false, false, "Add new experimental JSON type"},
{"use_json_alias_for_old_object_type", true, false, "Use JSON type alias to create new JSON type"},
{"type_json_skip_duplicated_paths", false, false, "Allow to skip duplicated paths during JSON parsing"},
{"join_output_by_rowlist_perkey_rows_threshold", 0, 5, "The lower limit of per-key average rows in the right table to determine whether to output by row list in hash join."},
{"allow_experimental_vector_similarity_index", false, false, "Added new setting to allow experimental vector similarity indexes"},
{"input_format_try_infer_datetimes_only_datetime64", true, false, "Allow to infer DateTime instead of DateTime64 in data formats"}
}

View File

@ -22,7 +22,6 @@
#include <cstring>
#include <unistd.h>
#include <algorithm>
#include <typeinfo>
#include <iostream>
#include <memory>

View File

@ -43,39 +43,21 @@ bool LocalObjectStorage::exists(const StoredObject & object) const
std::unique_ptr<ReadBufferFromFileBase> LocalObjectStorage::readObjects( /// NOLINT
const StoredObjects & objects,
const ReadSettings & read_settings,
std::optional<size_t> read_hint,
std::optional<size_t> file_size) const
std::optional<size_t>,
std::optional<size_t>) const
{
auto modified_settings = patchSettings(read_settings);
auto global_context = Context::getGlobalContextInstance();
auto read_buffer_creator =
[=] (bool /* restricted_seek */, const StoredObject & object)
-> std::unique_ptr<ReadBufferFromFileBase>
{
return createReadBufferFromFileBase(object.remote_path, modified_settings, read_hint, file_size);
};
auto read_buffer_creator = [=](bool /* restricted_seek */, const StoredObject & object) -> std::unique_ptr<ReadBufferFromFileBase>
{ return std::make_unique<ReadBufferFromFile>(object.remote_path); };
switch (read_settings.remote_fs_method)
{
case RemoteFSReadMethod::read:
{
return std::make_unique<ReadBufferFromRemoteFSGather>(
std::move(read_buffer_creator), objects, "file:", modified_settings,
global_context->getFilesystemCacheLog(), /* use_external_buffer */false);
}
case RemoteFSReadMethod::threadpool:
{
auto impl = std::make_unique<ReadBufferFromRemoteFSGather>(
std::move(read_buffer_creator), objects, "file:", modified_settings,
global_context->getFilesystemCacheLog(), /* use_external_buffer */true);
auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER);
return std::make_unique<AsynchronousBoundedReadBuffer>(
std::move(impl), reader, read_settings,
global_context->getAsyncReadCounters(),
global_context->getFilesystemReadPrefetchesLog());
}
}
return std::make_unique<ReadBufferFromRemoteFSGather>(
std::move(read_buffer_creator),
objects,
"file:",
modified_settings,
global_context->getFilesystemCacheLog(),
/* use_external_buffer */ false);
}
ReadSettings LocalObjectStorage::patchSettings(const ReadSettings & read_settings) const

View File

@ -257,7 +257,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
format_settings.max_bytes_to_read_for_schema_inference = settings.input_format_max_bytes_to_read_for_schema_inference;
format_settings.column_names_for_schema_inference = settings.column_names_for_schema_inference;
format_settings.schema_inference_hints = settings.schema_inference_hints;
format_settings.schema_inference_make_columns_nullable = settings.schema_inference_make_columns_nullable;
format_settings.schema_inference_make_columns_nullable = settings.schema_inference_make_columns_nullable.valueOr(2);
format_settings.mysql_dump.table_name = settings.input_format_mysql_dump_table_name;
format_settings.mysql_dump.map_column_names = settings.input_format_mysql_dump_map_column_names;
format_settings.sql_insert.max_batch_size = settings.output_format_sql_insert_max_batch_size;

View File

@ -77,7 +77,7 @@ struct FormatSettings
Raw
};
bool schema_inference_make_columns_nullable = true;
UInt64 schema_inference_make_columns_nullable = 1;
DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple;

View File

@ -1344,7 +1344,11 @@ namespace
if (checkCharCaseInsensitive('n', buf))
{
if (checkStringCaseInsensitive("ull", buf))
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>());
{
if (settings.schema_inference_make_columns_nullable == 0)
return std::make_shared<DataTypeNothing>();
return makeNullable(std::make_shared<DataTypeNothing>());
}
else if (checkStringCaseInsensitive("an", buf))
return std::make_shared<DataTypeFloat64>();
}

View File

@ -19,7 +19,9 @@
#include <Common/HashTable/Hash.h>
#if USE_SSL
# include <openssl/evp.h>
# include <openssl/md5.h>
# include <openssl/ripemd.h>
#endif
#include <bit>
@ -93,9 +95,9 @@ namespace impl
if (is_const)
i = 0;
assert(key0->size() == key1->size());
if (offsets != nullptr)
if (offsets != nullptr && i > 0)
{
const auto * const begin = offsets->begin();
const auto * const begin = std::upper_bound(offsets->begin(), offsets->end(), i - 1);
const auto * upper = std::upper_bound(begin, offsets->end(), i);
if (upper != offsets->end())
i = upper - begin;
@ -196,6 +198,34 @@ T combineHashesFunc(T t1, T t2)
return HashFunction::apply(reinterpret_cast<const char *>(hashes), sizeof(hashes));
}
#if USE_SSL
struct RipeMD160Impl
{
static constexpr auto name = "ripeMD160";
using ReturnType = UInt256;
static UInt256 apply(const char * begin, size_t size)
{
UInt8 digest[RIPEMD160_DIGEST_LENGTH];
RIPEMD160(reinterpret_cast<const unsigned char *>(begin), size, reinterpret_cast<unsigned char *>(digest));
std::reverse(digest, digest + RIPEMD160_DIGEST_LENGTH);
UInt256 res = 0;
std::memcpy(&res, digest, RIPEMD160_DIGEST_LENGTH);
return res;
}
static UInt256 combineHashes(UInt256 h1, UInt256 h2)
{
return combineHashesFunc<UInt256, RipeMD160Impl>(h1, h2);
}
static constexpr bool use_int_hash_for_pods = false;
};
#endif
struct SipHash64Impl
{
@ -1624,6 +1654,7 @@ using FunctionIntHash32 = FunctionIntHash<IntHash32Impl, NameIntHash32>;
using FunctionIntHash64 = FunctionIntHash<IntHash64Impl, NameIntHash64>;
#if USE_SSL
using FunctionHalfMD5 = FunctionAnyHash<HalfMD5Impl>;
using FunctionRipeMD160Hash = FunctionAnyHash<RipeMD160Impl>;
#endif
using FunctionSipHash128 = FunctionAnyHash<SipHash128Impl>;
using FunctionSipHash128Keyed = FunctionAnyHash<SipHash128KeyedImpl, true, SipHash128KeyedImpl::Key, SipHash128KeyedImpl::KeyColumns>;
@ -1652,6 +1683,7 @@ using FunctionXxHash64 = FunctionAnyHash<ImplXxHash64>;
using FunctionXXH3 = FunctionAnyHash<ImplXXH3>;
using FunctionWyHash64 = FunctionAnyHash<ImplWyHash64>;
}
#pragma clang diagnostic pop

View File

@ -0,0 +1,23 @@
#include "FunctionsHashing.h"
#include <Functions/FunctionFactory.h>
/// FunctionsHashing instantiations are separated into files FunctionsHashing*.cpp
/// to better parallelize the build procedure and avoid MSan build failure
/// due to excessive resource consumption.
namespace DB
{
#if USE_SSL
REGISTER_FUNCTION(HashingRipe)
{
factory.registerFunction<FunctionRipeMD160Hash>(FunctionDocumentation{
.description = "RIPEMD-160 hash function, primarily used in Bitcoin address generation.",
.examples{{"", "SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'));", R"(
hex(ripeMD160('The quick brown fox jumps over the lazy dog'))
37F332F68DB77BD9D7EDD4969571AD671CF9DD3B
)"}},
.categories{"Hash"}});
}
#endif
}

View File

@ -284,12 +284,12 @@ void OrdinalDate::init(int64_t modified_julian_day)
bool OrdinalDate::tryInit(int64_t modified_julian_day)
{
/// This function supports day number from -678941 to 2973119 (which represent 0000-01-01 and 9999-12-31 respectively).
/// This function supports day number from -678941 to 2973483 (which represent 0000-01-01 and 9999-12-31 respectively).
if (modified_julian_day < -678941)
return false;
if (modified_julian_day > 2973119)
if (modified_julian_day > 2973483)
return false;
const auto a = modified_julian_day + 678575;

View File

@ -1598,6 +1598,9 @@ ColumnPtr FunctionArrayElement::executeTuple(const ColumnsWithTypeAndName & argu
const auto & tuple_columns = col_nested->getColumns();
size_t tuple_size = tuple_columns.size();
if (tuple_size == 0)
return ColumnTuple::create(input_rows_count);
const DataTypes & tuple_types = typeid_cast<const DataTypeTuple &>(
*typeid_cast<const DataTypeArray &>(*arguments[0].type).getNestedType()).getElements();

View File

@ -443,6 +443,7 @@ std::unique_ptr<ReadBuffer> ReadWriteBufferFromHTTP::initialize()
}
response.getCookies(cookies);
response.getHeaders(response_headers);
content_encoding = response.get("Content-Encoding", "");
// Remember file size. It'll be used to report eof in next nextImpl() call.
@ -680,6 +681,19 @@ std::string ReadWriteBufferFromHTTP::getResponseCookie(const std::string & name,
return def;
}
Map ReadWriteBufferFromHTTP::getResponseHeaders() const
{
Map map;
for (const auto & header : response_headers)
{
Tuple elem;
elem.emplace_back(header.first);
elem.emplace_back(header.second);
map.emplace_back(elem);
}
return map;
}
void ReadWriteBufferFromHTTP::setNextCallback(NextCallback next_callback_)
{
next_callback = next_callback_;

View File

@ -90,6 +90,9 @@ private:
std::unique_ptr<ReadBuffer> impl;
std::vector<Poco::Net::HTTPCookie> cookies;
std::map<String, String> response_headers;
HTTPHeaderEntries http_header_entries;
std::function<void(size_t)> next_callback;
@ -187,6 +190,8 @@ public:
HTTPFileInfo getFileInfo();
static HTTPFileInfo parseFileInfo(const Poco::Net::HTTPResponse & response, size_t requested_range_begin);
Map getResponseHeaders() const;
};
using ReadWriteBufferFromHTTPPtr = std::unique_ptr<ReadWriteBufferFromHTTP>;

View File

@ -787,7 +787,7 @@ S3CredentialsProviderChain::S3CredentialsProviderChain(
/// EC2MetadataService delay is in order of seconds so it only make sense to retry after a couple of seconds.
/// But the connection timeout should be small because there is the case when there is no IMDS at all,
/// like outside of the cloud, on your own machines.
aws_client_configuration.connectTimeoutMs = 10;
aws_client_configuration.connectTimeoutMs = 50;
aws_client_configuration.requestTimeoutMs = 1000;
aws_client_configuration.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>(1, 1000);

View File

@ -67,17 +67,8 @@ namespace
user.authentication_methods.emplace_back();
}
bool has_no_password_authentication_method = std::find_if(
user.authentication_methods.begin(),
user.authentication_methods.end(),
[](const AuthenticationData & authentication_method)
{
return authentication_method.getType() == AuthenticationType::NO_PASSWORD;
}) != user.authentication_methods.end();
// 1. an IDENTIFIED WITH will drop existing authentication methods in favor of new ones.
// 2. if the user contains an auth method of type NO_PASSWORD and another one is being added, NO_PASSWORD must be dropped
if (replace_authentication_methods || (has_no_password_authentication_method && !authentication_methods.empty()))
if (replace_authentication_methods)
{
user.authentication_methods.clear();
}
@ -109,6 +100,18 @@ namespace
user.authentication_methods.emplace_back(authentication_method);
}
bool has_no_password_authentication_method = std::find_if(user.authentication_methods.begin(),
user.authentication_methods.end(),
[](const AuthenticationData & authentication_data)
{
return authentication_data.getType() == AuthenticationType::NO_PASSWORD;
}) != user.authentication_methods.end();
if (has_no_password_authentication_method && user.authentication_methods.size() > 1)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Authentication method 'no_password' cannot co-exist with other authentication methods");
}
if (!query.alter)
{
for (const auto & authentication_method : user.authentication_methods)

View File

@ -893,6 +893,12 @@ ContextData::ContextData(const ContextData &o) :
{
}
void ContextData::resetSharedContext()
{
std::lock_guard<std::mutex> lock(mutex_shared_context);
shared = nullptr;
}
Context::Context() = default;
Context::Context(const Context & rhs) : ContextData(rhs), std::enable_shared_from_this<Context>(rhs) {}
@ -914,14 +920,6 @@ ContextMutablePtr Context::createGlobal(ContextSharedPart * shared_part)
return res;
}
void Context::initGlobal()
{
assert(!global_context_instance);
global_context_instance = shared_from_this();
DatabaseCatalog::init(shared_from_this());
EventNotifier::init();
}
SharedContextHolder Context::createShared()
{
return SharedContextHolder(std::make_unique<ContextSharedPart>());
@ -2692,7 +2690,11 @@ void Context::makeSessionContext()
void Context::makeGlobalContext()
{
initGlobal();
assert(!global_context_instance);
global_context_instance = shared_from_this();
DatabaseCatalog::init(shared_from_this());
EventNotifier::init();
global_context = shared_from_this();
}
@ -4088,8 +4090,13 @@ void Context::initializeTraceCollector()
}
/// Call after unexpected crash happen.
void Context::handleCrash() const TSA_NO_THREAD_SAFETY_ANALYSIS
void Context::handleCrash() const
{
std::lock_guard<std::mutex> lock(mutex_shared_context);
if (!shared)
return;
SharedLockGuard lock2(shared->mutex);
if (shared->system_logs)
shared->system_logs->handleCrash();
}

View File

@ -492,6 +492,8 @@ public:
KitchenSink kitchen_sink;
void resetSharedContext();
protected:
using SampleBlockCache = std::unordered_map<std::string, Block>;
mutable SampleBlockCache sample_block_cache;
@ -529,6 +531,10 @@ protected:
mutable ThrottlerPtr local_write_query_throttler; /// A query-wide throttler for local IO writes
mutable ThrottlerPtr backups_query_throttler; /// A query-wide throttler for BACKUPs
mutable std::mutex mutex_shared_context; /// mutex to avoid accessing destroyed shared context pointer
/// some Context methods can be called after the shared context is destroyed
/// example, Context::handleCrash() method - called from signal handler
};
/** A set of known objects that can be used in the query.
@ -1387,8 +1393,6 @@ private:
ExternalUserDefinedExecutableFunctionsLoader & getExternalUserDefinedExecutableFunctionsLoaderWithLock(const std::lock_guard<std::mutex> & lock);
void initGlobal();
void setUserID(const UUID & user_id_);
void setCurrentRolesImpl(const std::vector<UUID> & new_current_roles, bool throw_if_not_granted, bool skip_if_not_granted, const std::shared_ptr<const User> & user);

View File

@ -701,7 +701,6 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription(
col_decl.codec, column.type, sanity_check_compression_codecs, allow_experimental_codecs, enable_deflate_qpl_codec, enable_zstd_qat_codec);
}
column.statistics.column_name = column.name; /// We assign column name here for better exception error message.
if (col_decl.statistics_desc)
{
if (!skip_checks && !context_->getSettingsRef().allow_experimental_statistics)

View File

@ -10,7 +10,7 @@ void PeriodicLog<LogElement>::startCollect(size_t collect_interval_milliseconds_
{
collect_interval_milliseconds = collect_interval_milliseconds_;
is_shutdown_metric_thread = false;
flush_thread = std::make_unique<ThreadFromGlobalPool>([this] { threadFunction(); });
collecting_thread = std::make_unique<ThreadFromGlobalPool>([this] { threadFunction(); });
}
template <typename LogElement>
@ -19,15 +19,15 @@ void PeriodicLog<LogElement>::stopCollect()
bool old_val = false;
if (!is_shutdown_metric_thread.compare_exchange_strong(old_val, true))
return;
if (flush_thread)
flush_thread->join();
if (collecting_thread)
collecting_thread->join();
}
template <typename LogElement>
void PeriodicLog<LogElement>::shutdown()
{
stopCollect();
this->stopFlushThread();
Base::shutdown();
}
template <typename LogElement>

View File

@ -17,6 +17,7 @@ template <typename LogElement>
class PeriodicLog : public SystemLog<LogElement>
{
using SystemLog<LogElement>::SystemLog;
using Base = SystemLog<LogElement>;
public:
using TimePoint = std::chrono::system_clock::time_point;
@ -24,18 +25,18 @@ public:
/// Launches a background thread to collect metrics with interval
void startCollect(size_t collect_interval_milliseconds_);
/// Stop background thread
void stopCollect();
void shutdown() final;
protected:
/// Stop background thread
void stopCollect();
virtual void stepFunction(TimePoint current_time) = 0;
private:
void threadFunction();
std::unique_ptr<ThreadFromGlobalPool> flush_thread;
std::unique_ptr<ThreadFromGlobalPool> collecting_thread;
size_t collect_interval_milliseconds;
std::atomic<bool> is_shutdown_metric_thread{false};
};

View File

@ -402,32 +402,13 @@ SystemLog<LogElement>::SystemLog(
template <typename LogElement>
void SystemLog<LogElement>::shutdown()
{
stopFlushThread();
Base::stopFlushThread();
auto table = DatabaseCatalog::instance().tryGetTable(table_id, getContext());
if (table)
table->flushAndShutdown();
}
template <typename LogElement>
void SystemLog<LogElement>::stopFlushThread()
{
{
std::lock_guard lock(thread_mutex);
if (!saving_thread || !saving_thread->joinable())
return;
if (is_shutdown)
return;
is_shutdown = true;
queue->shutdown();
}
saving_thread->join();
}
template <typename LogElement>
void SystemLog<LogElement>::savingThreadFunction()

View File

@ -125,8 +125,6 @@ public:
void shutdown() override;
void stopFlushThread() override;
/** Creates new table if it does not exist.
* Renames old table if its structure is not suitable.
* This cannot be done in constructor to avoid deadlock while renaming a table under locked Context when SystemLog object is created.
@ -136,9 +134,6 @@ public:
protected:
LoggerPtr log;
using ISystemLog::is_shutdown;
using ISystemLog::saving_thread;
using ISystemLog::thread_mutex;
using Base::queue;
StoragePtr getStorage() const;

View File

@ -25,11 +25,6 @@
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
namespace
{
bool parseRenameTo(IParserBase::Pos & pos, Expected & expected, std::optional<String> & new_name)
@ -53,7 +48,8 @@ namespace
Expected & expected,
std::shared_ptr<ASTAuthenticationData> & auth_data,
bool is_type_specifier_mandatory,
bool is_type_specifier_allowed)
bool is_type_specifier_allowed,
bool should_parse_no_password)
{
return IParserBase::wrapParseImpl(pos, [&]
{
@ -67,7 +63,7 @@ namespace
bool expect_public_ssh_key = false;
bool expect_http_auth_server = false;
for (auto check_type : collections::range(AuthenticationType::MAX))
auto parse_non_password_based_type = [&](auto check_type)
{
if (ParserKeyword{AuthenticationTypeInfo::get(check_type).keyword}.ignore(pos, expected))
{
@ -86,7 +82,20 @@ namespace
else if (check_type != AuthenticationType::NO_PASSWORD)
expect_password = true;
break;
return true;
}
return false;
};
{
const auto first_authentication_type_element_to_check
= should_parse_no_password ? AuthenticationType::NO_PASSWORD : AuthenticationType::PLAINTEXT_PASSWORD;
for (auto check_type : collections::range(first_authentication_type_element_to_check, AuthenticationType::MAX))
{
if (parse_non_password_based_type(check_type))
break;
}
}
@ -219,7 +228,11 @@ namespace
}
bool parseIdentifiedWith(IParserBase::Pos & pos, Expected & expected, std::vector<std::shared_ptr<ASTAuthenticationData>> & authentication_methods)
bool parseIdentifiedWith(
IParserBase::Pos & pos,
Expected & expected,
std::vector<std::shared_ptr<ASTAuthenticationData>> & authentication_methods,
bool should_parse_no_password)
{
return IParserBase::wrapParseImpl(pos, [&]
{
@ -232,7 +245,7 @@ namespace
std::shared_ptr<ASTAuthenticationData> ast_authentication_data;
if (!parseAuthenticationData(pos, expected, ast_authentication_data, is_type_specifier_mandatory, is_type_specifier_mandatory))
if (!parseAuthenticationData(pos, expected, ast_authentication_data, is_type_specifier_mandatory, is_type_specifier_mandatory, should_parse_no_password))
{
return false;
}
@ -248,7 +261,7 @@ namespace
{
std::shared_ptr<ASTAuthenticationData> ast_authentication_data;
if (!parseAuthenticationData(aux_pos, expected, ast_authentication_data, false, true))
if (!parseAuthenticationData(aux_pos, expected, ast_authentication_data, false, true, should_parse_no_password))
{
break;
}
@ -273,7 +286,7 @@ namespace
return true;
}
return parseIdentifiedWith(pos, expected, authentication_methods);
return parseIdentifiedWith(pos, expected, authentication_methods, true);
});
}
@ -480,7 +493,7 @@ namespace
return false;
}
return parseIdentifiedWith(pos, expected, auth_data);
return parseIdentifiedWith(pos, expected, auth_data, false);
});
}
@ -551,29 +564,19 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
while (true)
{
if (auth_data.empty())
if (auth_data.empty() && !reset_authentication_methods_to_new)
{
parsed_identified_with = parseIdentifiedOrNotIdentified(pos, expected, auth_data);
if (!parsed_identified_with)
if (!parsed_identified_with && alter)
{
parsed_add_identified_with = parseAddIdentifiedWith(pos, expected, auth_data);
if (parsed_add_identified_with && !alter)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Create user query is not allowed to have ADD IDENTIFIED, remove the ADD keyword.");
}
}
}
if (!reset_authentication_methods_to_new)
if (!reset_authentication_methods_to_new && alter && auth_data.empty())
{
reset_authentication_methods_to_new = parseResetAuthenticationMethods(pos, expected);
if (reset_authentication_methods_to_new && !alter)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "RESET AUTHENTICATION METHODS TO NEW can only be used on ALTER statement");
}
}
if (!valid_until)
@ -640,31 +643,6 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
break;
}
bool has_no_password_authentication_method = std::find_if(
auth_data.begin(),
auth_data.end(),
[](const std::shared_ptr<ASTAuthenticationData> & ast_authentication_data)
{
return ast_authentication_data->type == AuthenticationType::NO_PASSWORD;
}) != auth_data.end();
if (has_no_password_authentication_method && auth_data.size() > 1)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Authentication method 'no_password' cannot co-exist with other authentication methods.");
}
if (has_no_password_authentication_method && parsed_add_identified_with)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Authentication method 'no_password' cannot co-exist with other authentication methods. "
"Use 'ALTER USER xyz IDENTIFIED WITH no_password' to replace existing authentication methods");
}
if (reset_authentication_methods_to_new && !auth_data.empty())
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "RESET AUTHENTICATION METHODS TO NEW cannot be used along with [ADD] IDENTIFIED clauses");
}
if (!alter && !hosts)
{
String common_host_pattern;
@ -700,7 +678,7 @@ bool ParserCreateUserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
query->storage_name = std::move(storage_name);
query->reset_authentication_methods_to_new = reset_authentication_methods_to_new;
query->add_identified_with = parsed_add_identified_with;
query->replace_authentication_methods = parsed_identified_with || has_no_password_authentication_method;
query->replace_authentication_methods = parsed_identified_with;
for (const auto & authentication_method : query->authentication_methods)
{

View File

@ -54,13 +54,8 @@ void checkFinalInferredType(
type = default_type;
}
if (settings.schema_inference_make_columns_nullable)
if (settings.schema_inference_make_columns_nullable == 1)
type = makeNullableRecursively(type);
/// In case when data for some column could contain nulls and regular values,
/// resulting inferred type is Nullable.
/// If input_format_null_as_default is enabled, we should remove Nullable type.
else if (settings.null_as_default)
type = removeNullable(type);
}
void ISchemaReader::transformTypesIfNeeded(DB::DataTypePtr & type, DB::DataTypePtr & new_type)

View File

@ -204,8 +204,11 @@ NamesAndTypesList ArrowSchemaReader::readSchema()
schema = file_reader->schema();
auto header = ArrowColumnToCHColumn::arrowSchemaToCHHeader(
*schema, stream ? "ArrowStream" : "Arrow", format_settings.arrow.skip_columns_with_unsupported_types_in_schema_inference);
if (format_settings.schema_inference_make_columns_nullable)
*schema,
stream ? "ArrowStream" : "Arrow",
format_settings.arrow.skip_columns_with_unsupported_types_in_schema_inference,
format_settings.schema_inference_make_columns_nullable != 0);
if (format_settings.schema_inference_make_columns_nullable == 1)
return getNamesAndRecursivelyNullableTypes(header);
return header.getNamesAndTypesList();
}

View File

@ -727,6 +727,7 @@ struct ReadColumnFromArrowColumnSettings
FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior;
bool allow_arrow_null_type;
bool skip_columns_with_unsupported_types;
bool allow_inferring_nullable_columns;
};
static ColumnWithTypeAndName readColumnFromArrowColumn(
@ -1109,7 +1110,7 @@ static ColumnWithTypeAndName readColumnFromArrowColumn(
bool is_map_nested_column,
const ReadColumnFromArrowColumnSettings & settings)
{
bool read_as_nullable_column = arrow_column->null_count() || is_nullable_column || (type_hint && type_hint->isNullable());
bool read_as_nullable_column = (arrow_column->null_count() || is_nullable_column || (type_hint && type_hint->isNullable())) && settings.allow_inferring_nullable_columns;
if (read_as_nullable_column &&
arrow_column->type()->id() != arrow::Type::LIST &&
arrow_column->type()->id() != arrow::Type::LARGE_LIST &&
@ -1173,14 +1174,16 @@ static std::shared_ptr<arrow::ChunkedArray> createArrowColumn(const std::shared_
Block ArrowColumnToCHColumn::arrowSchemaToCHHeader(
const arrow::Schema & schema,
const std::string & format_name,
bool skip_columns_with_unsupported_types)
bool skip_columns_with_unsupported_types,
bool allow_inferring_nullable_columns)
{
ReadColumnFromArrowColumnSettings settings
{
.format_name = format_name,
.date_time_overflow_behavior = FormatSettings::DateTimeOverflowBehavior::Ignore,
.allow_arrow_null_type = false,
.skip_columns_with_unsupported_types = skip_columns_with_unsupported_types
.skip_columns_with_unsupported_types = skip_columns_with_unsupported_types,
.allow_inferring_nullable_columns = allow_inferring_nullable_columns,
};
ColumnsWithTypeAndName sample_columns;
@ -1254,7 +1257,8 @@ Chunk ArrowColumnToCHColumn::arrowColumnsToCHChunk(const NameToArrowColumn & nam
.format_name = format_name,
.date_time_overflow_behavior = date_time_overflow_behavior,
.allow_arrow_null_type = true,
.skip_columns_with_unsupported_types = false
.skip_columns_with_unsupported_types = false,
.allow_inferring_nullable_columns = true
};
Columns columns;

View File

@ -34,7 +34,8 @@ public:
static Block arrowSchemaToCHHeader(
const arrow::Schema & schema,
const std::string & format_name,
bool skip_columns_with_unsupported_types = false);
bool skip_columns_with_unsupported_types = false,
bool allow_inferring_nullable_columns = true);
struct DictionaryInfo
{

View File

@ -15,8 +15,8 @@ namespace ErrorCodes
}
template <bool with_defaults>
BinaryRowInputFormat<with_defaults>::BinaryRowInputFormat(ReadBuffer & in_, const Block & header, Params params_, bool with_names_, bool with_types_, const FormatSettings & format_settings_)
: RowInputFormatWithNamesAndTypes(
BinaryRowInputFormat<with_defaults>::BinaryRowInputFormat(ReadBuffer & in_, const Block & header, IRowInputFormat::Params params_, bool with_names_, bool with_types_, const FormatSettings & format_settings_)
: RowInputFormatWithNamesAndTypes<BinaryFormatReader<with_defaults>>(
header,
in_,
params_,

View File

@ -10,13 +10,16 @@ namespace DB
class ReadBuffer;
template <bool>
class BinaryFormatReader;
/** A stream for inputting data in a binary line-by-line format.
*/
template <bool with_defaults = false>
class BinaryRowInputFormat final : public RowInputFormatWithNamesAndTypes
class BinaryRowInputFormat final : public RowInputFormatWithNamesAndTypes<BinaryFormatReader<with_defaults>>
{
public:
BinaryRowInputFormat(ReadBuffer & in_, const Block & header, Params params_, bool with_names_, bool with_types_, const FormatSettings & format_settings_);
BinaryRowInputFormat(ReadBuffer & in_, const Block & header, IRowInputFormat::Params params_, bool with_names_, bool with_types_, const FormatSettings & format_settings_);
String getName() const override { return "BinaryRowInputFormat"; }

View File

@ -61,7 +61,7 @@ CSVRowInputFormat::CSVRowInputFormat(
bool with_names_,
bool with_types_,
const FormatSettings & format_settings_,
std::unique_ptr<FormatWithNamesAndTypesReader> format_reader_)
std::unique_ptr<CSVFormatReader> format_reader_)
: RowInputFormatWithNamesAndTypes(
header_,
*in_,

View File

@ -1,7 +1,6 @@
#pragma once
#include <optional>
#include <unordered_map>
#include <Core/Block.h>
#include <Processors/Formats/RowInputFormatWithNamesAndTypes.h>
@ -13,10 +12,12 @@
namespace DB
{
class CSVFormatReader;
/** A stream for inputting data in csv format.
* Does not conform with https://tools.ietf.org/html/rfc4180 because it skips spaces and tabs between values.
*/
class CSVRowInputFormat : public RowInputFormatWithNamesAndTypes
class CSVRowInputFormat : public RowInputFormatWithNamesAndTypes<CSVFormatReader>
{
public:
/** with_names - in the first line the header with column names
@ -32,7 +33,7 @@ public:
protected:
CSVRowInputFormat(const Block & header_, std::shared_ptr<PeekableReadBuffer> in_, const Params & params_,
bool with_names_, bool with_types_, const FormatSettings & format_settings_, std::unique_ptr<FormatWithNamesAndTypesReader> format_reader_);
bool with_names_, bool with_types_, const FormatSettings & format_settings_, std::unique_ptr<CSVFormatReader> format_reader_);
CSVRowInputFormat(const Block & header_, std::shared_ptr<PeekableReadBuffer> in_buf_, const Params & params_,
bool with_names_, bool with_types_, const FormatSettings & format_settings_);

View File

@ -9,7 +9,8 @@
namespace DB
{
class CustomSeparatedRowInputFormat final : public RowInputFormatWithNamesAndTypes
class CustomSeparatedFormatReader;
class CustomSeparatedRowInputFormat final : public RowInputFormatWithNamesAndTypes<CustomSeparatedFormatReader>
{
public:
CustomSeparatedRowInputFormat(

View File

@ -11,7 +11,7 @@ namespace DB
{
class ReadBuffer;
class JSONCompactEachRowFormatReader;
/** A stream for reading data in a bunch of formats:
* - JSONCompactEachRow
@ -20,7 +20,7 @@ class ReadBuffer;
* - JSONCompactStringsEachRowWithNamesAndTypes
*
*/
class JSONCompactEachRowRowInputFormat final : public RowInputFormatWithNamesAndTypes
class JSONCompactEachRowRowInputFormat final : public RowInputFormatWithNamesAndTypes<JSONCompactEachRowFormatReader>
{
public:
JSONCompactEachRowRowInputFormat(

View File

@ -14,7 +14,7 @@ namespace ErrorCodes
JSONCompactRowInputFormat::JSONCompactRowInputFormat(
const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_)
: RowInputFormatWithNamesAndTypes(
: RowInputFormatWithNamesAndTypes<JSONCompactFormatReader>(
header_, in_, params_, false, false, false, format_settings_, std::make_unique<JSONCompactFormatReader>(in_, format_settings_))
{
}

View File

@ -5,8 +5,8 @@
namespace DB
{
class JSONCompactRowInputFormat final : public RowInputFormatWithNamesAndTypes
class JSONCompactFormatReader;
class JSONCompactRowInputFormat final : public RowInputFormatWithNamesAndTypes<JSONCompactFormatReader>
{
public:
JSONCompactRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_);

View File

@ -1002,7 +1002,7 @@ NamesAndTypesList NativeORCSchemaReader::readSchema()
header.insert(ColumnWithTypeAndName{type, name});
}
if (format_settings.schema_inference_make_columns_nullable)
if (format_settings.schema_inference_make_columns_nullable == 1)
return getNamesAndRecursivelyNullableTypes(header);
return header.getNamesAndTypesList();
}

View File

@ -160,8 +160,11 @@ NamesAndTypesList ORCSchemaReader::readSchema()
{
initializeIfNeeded();
auto header = ArrowColumnToCHColumn::arrowSchemaToCHHeader(
*schema, "ORC", format_settings.orc.skip_columns_with_unsupported_types_in_schema_inference);
if (format_settings.schema_inference_make_columns_nullable)
*schema,
"ORC",
format_settings.orc.skip_columns_with_unsupported_types_in_schema_inference,
format_settings.schema_inference_make_columns_nullable != 0);
if (format_settings.schema_inference_make_columns_nullable == 1)
return getNamesAndRecursivelyNullableTypes(header);
return header.getNamesAndTypesList();
}

View File

@ -869,8 +869,11 @@ NamesAndTypesList ParquetSchemaReader::readSchema()
THROW_ARROW_NOT_OK(parquet::arrow::FromParquetSchema(metadata->schema(), &schema));
auto header = ArrowColumnToCHColumn::arrowSchemaToCHHeader(
*schema, "Parquet", format_settings.parquet.skip_columns_with_unsupported_types_in_schema_inference);
if (format_settings.schema_inference_make_columns_nullable)
*schema,
"Parquet",
format_settings.parquet.skip_columns_with_unsupported_types_in_schema_inference,
format_settings.schema_inference_make_columns_nullable != 0);
if (format_settings.schema_inference_make_columns_nullable == 1)
return getNamesAndRecursivelyNullableTypes(header);
return header.getNamesAndTypesList();
}

View File

@ -10,9 +10,11 @@
namespace DB
{
class TabSeparatedFormatReader;
/** A stream to input data in tsv format.
*/
class TabSeparatedRowInputFormat final : public RowInputFormatWithNamesAndTypes
class TabSeparatedRowInputFormat final : public RowInputFormatWithNamesAndTypes<TabSeparatedFormatReader>
{
public:
/** with_names - the first line is the header with the names of the columns

View File

@ -1,14 +1,20 @@
#include <Processors/Formats/RowInputFormatWithNamesAndTypes.h>
#include <Processors/Formats/ISchemaReader.h>
#include <DataTypes/DataTypeNothing.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <IO/ReadHelpers.h>
#include <IO/Operators.h>
#include <IO/ReadBufferFromString.h>
#include <IO/PeekableReadBuffer.h>
#include <DataTypes/DataTypeNothing.h>
#include <DataTypes/DataTypeNullable.h>
#include <Formats/EscapingRuleUtils.h>
#include <IO/Operators.h>
#include <IO/PeekableReadBuffer.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <Processors/Formats/ISchemaReader.h>
#include <Processors/Formats/Impl/BinaryRowInputFormat.h>
#include <Processors/Formats/Impl/CSVRowInputFormat.h>
#include <Processors/Formats/Impl/CustomSeparatedRowInputFormat.h>
#include <Processors/Formats/Impl/HiveTextRowInputFormat.h>
#include <Processors/Formats/Impl/JSONCompactRowInputFormat.h>
#include <Processors/Formats/Impl/TabSeparatedRowInputFormat.h>
#include <Processors/Formats/RowInputFormatWithNamesAndTypes.h>
namespace DB
@ -44,7 +50,8 @@ namespace
}
}
RowInputFormatWithNamesAndTypes::RowInputFormatWithNamesAndTypes(
template <typename FormatReaderImpl>
RowInputFormatWithNamesAndTypes<FormatReaderImpl>::RowInputFormatWithNamesAndTypes(
const Block & header_,
ReadBuffer & in_,
const Params & params_,
@ -52,7 +59,7 @@ RowInputFormatWithNamesAndTypes::RowInputFormatWithNamesAndTypes(
bool with_names_,
bool with_types_,
const FormatSettings & format_settings_,
std::unique_ptr<FormatWithNamesAndTypesReader> format_reader_,
std::unique_ptr<FormatReaderImpl> format_reader_,
bool try_detect_header_)
: RowInputFormatWithDiagnosticInfo(header_, in_, params_)
, format_settings(format_settings_)
@ -66,7 +73,8 @@ RowInputFormatWithNamesAndTypes::RowInputFormatWithNamesAndTypes(
column_indexes_by_names = getPort().getHeader().getNamesToIndexesMap();
}
void RowInputFormatWithNamesAndTypes::readPrefix()
template <typename FormatReaderImpl>
void RowInputFormatWithNamesAndTypes<FormatReaderImpl>::readPrefix()
{
/// Search and remove BOM only in textual formats (CSV, TSV etc), not in binary ones (RowBinary*).
/// Also, we assume that column name or type cannot contain BOM, so, if format has header,
@ -138,7 +146,8 @@ void RowInputFormatWithNamesAndTypes::readPrefix()
}
}
void RowInputFormatWithNamesAndTypes::tryDetectHeader(std::vector<String> & column_names_out, std::vector<String> & type_names_out)
template <typename FormatReaderImpl>
void RowInputFormatWithNamesAndTypes<FormatReaderImpl>::tryDetectHeader(std::vector<String> & column_names_out, std::vector<String> & type_names_out)
{
auto & read_buf = getReadBuffer();
PeekableReadBuffer * peekable_buf = dynamic_cast<PeekableReadBuffer *>(&read_buf);
@ -201,7 +210,8 @@ void RowInputFormatWithNamesAndTypes::tryDetectHeader(std::vector<String> & colu
peekable_buf->dropCheckpoint();
}
bool RowInputFormatWithNamesAndTypes::readRow(MutableColumns & columns, RowReadExtension & ext)
template <typename FormatReaderImpl>
bool RowInputFormatWithNamesAndTypes<FormatReaderImpl>::readRow(MutableColumns & columns, RowReadExtension & ext)
{
if (unlikely(end_of_stream))
return false;
@ -280,7 +290,8 @@ bool RowInputFormatWithNamesAndTypes::readRow(MutableColumns & columns, RowReadE
return true;
}
size_t RowInputFormatWithNamesAndTypes::countRows(size_t max_block_size)
template <typename FormatReaderImpl>
size_t RowInputFormatWithNamesAndTypes<FormatReaderImpl>::countRows(size_t max_block_size)
{
if (unlikely(end_of_stream))
return 0;
@ -304,7 +315,8 @@ size_t RowInputFormatWithNamesAndTypes::countRows(size_t max_block_size)
return num_rows;
}
void RowInputFormatWithNamesAndTypes::resetParser()
template <typename FormatReaderImpl>
void RowInputFormatWithNamesAndTypes<FormatReaderImpl>::resetParser()
{
RowInputFormatWithDiagnosticInfo::resetParser();
column_mapping->column_indexes_for_input_fields.clear();
@ -313,7 +325,8 @@ void RowInputFormatWithNamesAndTypes::resetParser()
end_of_stream = false;
}
void RowInputFormatWithNamesAndTypes::tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column)
template <typename FormatReaderImpl>
void RowInputFormatWithNamesAndTypes<FormatReaderImpl>::tryDeserializeField(const DataTypePtr & type, IColumn & column, size_t file_column)
{
const auto & index = column_mapping->column_indexes_for_input_fields[file_column];
if (index)
@ -328,7 +341,8 @@ void RowInputFormatWithNamesAndTypes::tryDeserializeField(const DataTypePtr & ty
}
}
bool RowInputFormatWithNamesAndTypes::parseRowAndPrintDiagnosticInfo(MutableColumns & columns, WriteBuffer & out)
template <typename FormatReaderImpl>
bool RowInputFormatWithNamesAndTypes<FormatReaderImpl>::parseRowAndPrintDiagnosticInfo(MutableColumns & columns, WriteBuffer & out)
{
if (in->eof())
{
@ -374,12 +388,14 @@ bool RowInputFormatWithNamesAndTypes::parseRowAndPrintDiagnosticInfo(MutableColu
return format_reader->parseRowEndWithDiagnosticInfo(out);
}
bool RowInputFormatWithNamesAndTypes::isGarbageAfterField(size_t index, ReadBuffer::Position pos)
template <typename FormatReaderImpl>
bool RowInputFormatWithNamesAndTypes<FormatReaderImpl>::isGarbageAfterField(size_t index, ReadBuffer::Position pos)
{
return format_reader->isGarbageAfterField(index, pos);
}
void RowInputFormatWithNamesAndTypes::setReadBuffer(ReadBuffer & in_)
template <typename FormatReaderImpl>
void RowInputFormatWithNamesAndTypes<FormatReaderImpl>::setReadBuffer(ReadBuffer & in_)
{
format_reader->setReadBuffer(in_);
IInputFormat::setReadBuffer(in_);
@ -582,5 +598,12 @@ void FormatWithNamesAndTypesSchemaReader::transformTypesIfNeeded(DB::DataTypePtr
transformInferredTypesIfNeeded(type, new_type, format_settings);
}
template class RowInputFormatWithNamesAndTypes<JSONCompactFormatReader>;
template class RowInputFormatWithNamesAndTypes<JSONCompactEachRowFormatReader>;
template class RowInputFormatWithNamesAndTypes<TabSeparatedFormatReader>;
template class RowInputFormatWithNamesAndTypes<CSVFormatReader>;
template class RowInputFormatWithNamesAndTypes<CustomSeparatedFormatReader>;
template class RowInputFormatWithNamesAndTypes<BinaryFormatReader<true>>;
template class RowInputFormatWithNamesAndTypes<BinaryFormatReader<false>>;
}

View File

@ -26,6 +26,7 @@ class FormatWithNamesAndTypesReader;
/// will be compared types from header.
/// It's important that firstly this class reads/skips names and only
/// then reads/skips types. So you can this invariant.
template <typename FormatReaderImpl>
class RowInputFormatWithNamesAndTypes : public RowInputFormatWithDiagnosticInfo
{
protected:
@ -41,7 +42,7 @@ protected:
bool with_names_,
bool with_types_,
const FormatSettings & format_settings_,
std::unique_ptr<FormatWithNamesAndTypesReader> format_reader_,
std::unique_ptr<FormatReaderImpl> format_reader_,
bool try_detect_header_ = false);
void resetParser() override;
@ -70,7 +71,7 @@ private:
bool is_header_detected = false;
protected:
std::unique_ptr<FormatWithNamesAndTypesReader> format_reader;
std::unique_ptr<FormatReaderImpl> format_reader;
Block::NameMap column_indexes_by_names;
};

View File

@ -255,7 +255,7 @@ void buildSortingDAG(QueryPlan::Node & node, std::optional<ActionsDAG> & dag, Fi
/// Add more functions to fixed columns.
/// Functions result is fixed if all arguments are fixed or constants.
void enreachFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns)
void enrichFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns)
{
struct Frame
{
@ -300,20 +300,20 @@ void enreachFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns)
{
if (frame.node->function_base->isDeterministicInScopeOfQuery())
{
//std::cerr << "*** enreachFixedColumns check " << frame.node->result_name << std::endl;
//std::cerr << "*** enrichFixedColumns check " << frame.node->result_name << std::endl;
bool all_args_fixed_or_const = true;
for (const auto * child : frame.node->children)
{
if (!child->column && !fixed_columns.contains(child))
{
//std::cerr << "*** enreachFixedColumns fail " << child->result_name << ' ' << static_cast<const void *>(child) << std::endl;
//std::cerr << "*** enrichFixedColumns fail " << child->result_name << ' ' << static_cast<const void *>(child) << std::endl;
all_args_fixed_or_const = false;
}
}
if (all_args_fixed_or_const)
{
//std::cerr << "*** enreachFixedColumns add " << frame.node->result_name << ' ' << static_cast<const void *>(frame.node) << std::endl;
//std::cerr << "*** enrichFixedColumns add " << frame.node->result_name << ' ' << static_cast<const void *>(frame.node) << std::endl;
fixed_columns.insert(frame.node);
}
}
@ -357,7 +357,7 @@ InputOrderInfoPtr buildInputOrderInfo(
}
}
enreachFixedColumns(sorting_key_dag, fixed_key_columns);
enrichFixedColumns(sorting_key_dag, fixed_key_columns);
}
/// This is a result direction we will read from MergeTree
@ -530,7 +530,7 @@ AggregationInputOrder buildInputOrderInfo(
}
}
enreachFixedColumns(sorting_key_dag, fixed_key_columns);
enrichFixedColumns(sorting_key_dag, fixed_key_columns);
for (const auto * output : dag->getOutputs())
{
@ -804,7 +804,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n
buildSortingDAG(node, dag, fixed_columns, limit);
if (dag && !fixed_columns.empty())
enreachFixedColumns(*dag, fixed_columns);
enrichFixedColumns(*dag, fixed_columns);
if (auto * reading = typeid_cast<ReadFromMergeTree *>(reading_node->step.get()))
{
@ -858,7 +858,7 @@ AggregationInputOrder buildInputOrderInfo(AggregatingStep & aggregating, QueryPl
buildSortingDAG(node, dag, fixed_columns, limit);
if (dag && !fixed_columns.empty())
enreachFixedColumns(*dag, fixed_columns);
enrichFixedColumns(*dag, fixed_columns);
if (auto * reading = typeid_cast<ReadFromMergeTree *>(reading_node->step.get()))
{

View File

@ -706,9 +706,9 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
}
auto stats_vec = ColumnStatisticsDescription::fromAST(statistics_decl, metadata.columns);
for (const auto & stats : stats_vec)
for (const auto & [stats_column_name, stats] : stats_vec)
{
metadata.columns.modify(stats.column_name,
metadata.columns.modify(stats_column_name,
[&](ColumnDescription & column) { column.statistics.merge(stats, column.name, column.type, if_not_exists); });
}
}
@ -735,14 +735,14 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
{
if (!metadata.columns.has(statistics_column_name))
{
throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Cannot add statistics for column {}: this column is not found", statistics_column_name);
throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Cannot modify statistics for column {}: this column is not found", statistics_column_name);
}
}
auto stats_vec = ColumnStatisticsDescription::fromAST(statistics_decl, metadata.columns);
for (const auto & stats : stats_vec)
for (const auto & [stats_column_name, stats] : stats_vec)
{
metadata.columns.modify(stats.column_name,
metadata.columns.modify(stats_column_name,
[&](ColumnDescription & column) { column.statistics.assign(stats); });
}
}
@ -867,8 +867,6 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
rename_visitor.visit(column_to_modify.default_desc.expression);
if (column_to_modify.ttl)
rename_visitor.visit(column_to_modify.ttl);
if (column_to_modify.name == column_name && !column_to_modify.statistics.empty())
column_to_modify.statistics.column_name = rename_to;
});
}
if (metadata.table_ttl.definition_ast)

View File

@ -113,7 +113,15 @@ bool ColumnDescription::operator==(const ColumnDescription & other) const
&& ast_to_str(ttl) == ast_to_str(other.ttl);
}
void ColumnDescription::writeText(WriteBuffer & buf) const
String formatASTStateAware(IAST & ast, IAST::FormatState & state)
{
WriteBufferFromOwnString buf;
IAST::FormatSettings settings(buf, true, false);
ast.formatImpl(settings, state, IAST::FormatStateStacked());
return buf.str();
}
void ColumnDescription::writeText(WriteBuffer & buf, IAST::FormatState & state, bool include_comment) const
{
/// NOTE: Serialization format is insane.
@ -126,20 +134,21 @@ void ColumnDescription::writeText(WriteBuffer & buf) const
writeChar('\t', buf);
DB::writeText(DB::toString(default_desc.kind), buf);
writeChar('\t', buf);
writeEscapedString(queryToString(default_desc.expression), buf);
writeEscapedString(formatASTStateAware(*default_desc.expression, state), buf);
}
if (!comment.empty())
if (!comment.empty() && include_comment)
{
writeChar('\t', buf);
DB::writeText("COMMENT ", buf);
writeEscapedString(queryToString(ASTLiteral(Field(comment))), buf);
auto ast = ASTLiteral(Field(comment));
writeEscapedString(formatASTStateAware(ast, state), buf);
}
if (codec)
{
writeChar('\t', buf);
writeEscapedString(queryToString(codec), buf);
writeEscapedString(formatASTStateAware(*codec, state), buf);
}
if (!settings.empty())
@ -150,21 +159,21 @@ void ColumnDescription::writeText(WriteBuffer & buf) const
ASTSetQuery ast;
ast.is_standalone = false;
ast.changes = settings;
writeEscapedString(queryToString(ast), buf);
writeEscapedString(formatASTStateAware(ast, state), buf);
DB::writeText(")", buf);
}
if (!statistics.empty())
{
writeChar('\t', buf);
writeEscapedString(queryToString(statistics.getAST()), buf);
writeEscapedString(formatASTStateAware(*statistics.getAST(), state), buf);
}
if (ttl)
{
writeChar('\t', buf);
DB::writeText("TTL ", buf);
writeEscapedString(queryToString(ttl), buf);
writeEscapedString(formatASTStateAware(*ttl, state), buf);
}
writeChar('\n', buf);
@ -209,11 +218,7 @@ void ColumnDescription::readText(ReadBuffer & buf)
settings = col_ast->settings->as<ASTSetQuery &>().changes;
if (col_ast->statistics_desc)
{
statistics = ColumnStatisticsDescription::fromColumnDeclaration(*col_ast, type);
/// every column has name `x` here, so we have to set the name manually.
statistics.column_name = name;
}
}
else
throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse column description");
@ -895,16 +900,17 @@ void ColumnsDescription::resetColumnTTLs()
}
String ColumnsDescription::toString() const
String ColumnsDescription::toString(bool include_comments) const
{
WriteBufferFromOwnString buf;
IAST::FormatState ast_format_state;
writeCString("columns format version: 1\n", buf);
DB::writeText(columns.size(), buf);
writeCString(" columns:\n", buf);
for (const ColumnDescription & column : columns)
column.writeText(buf);
column.writeText(buf, ast_format_state, include_comments);
return buf.str();
}

View File

@ -104,7 +104,7 @@ struct ColumnDescription
bool operator==(const ColumnDescription & other) const;
bool operator!=(const ColumnDescription & other) const { return !(*this == other); }
void writeText(WriteBuffer & buf) const;
void writeText(WriteBuffer & buf, IAST::FormatState & state, bool include_comment) const;
void readText(ReadBuffer & buf);
};
@ -137,7 +137,7 @@ public:
/// NOTE Must correspond with Nested::flatten function.
void flattenNested(); /// TODO: remove, insert already flattened Nested columns.
bool operator==(const ColumnsDescription & other) const { return columns == other.columns; }
bool operator==(const ColumnsDescription & other) const { return toString(false) == other.toString(false); }
bool operator!=(const ColumnsDescription & other) const { return !(*this == other); }
auto begin() const { return columns.begin(); }
@ -221,7 +221,7 @@ public:
/// Does column has non default specified compression codec
bool hasCompressionCodec(const String & column_name) const;
String toString() const;
String toString(bool include_comments = true) const;
static ColumnsDescription parse(const String & str);
size_t size() const

View File

@ -444,8 +444,8 @@ StorageHive::StorageHive(
storage_metadata.setComment(comment_);
storage_metadata.partition_key = KeyDescription::getKeyFromAST(partition_by_ast, storage_metadata.columns, getContext());
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, getContext()));
setInMemoryMetadata(storage_metadata);
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), getContext()));
}
void StorageHive::lazyInitialize()

View File

@ -807,7 +807,7 @@ MergeTreeDataPartBuilder IMergeTreeDataPart::getProjectionPartBuilder(const Stri
const char * projection_extension = is_temp_projection ? ".tmp_proj" : ".proj";
auto projection_storage = getDataPartStorage().getProjection(projection_name + projection_extension, !is_temp_projection);
MergeTreeDataPartBuilder builder(storage, projection_name, projection_storage);
return builder.withPartInfo({"all", 0, 0, 0}).withParentPart(this);
return builder.withPartInfo(MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION).withParentPart(this);
}
void IMergeTreeDataPart::addProjectionPart(
@ -1334,17 +1334,6 @@ void IMergeTreeDataPart::loadRowsCount()
auto buf = metadata_manager->read("count.txt");
readIntText(rows_count, *buf);
assertEOF(*buf);
if (!index_granularity.empty() && rows_count < index_granularity.getTotalRows() && index_granularity_info.fixed_index_granularity)
{
/// Adjust last granule size to match the number of rows in the part in case of fixed index_granularity.
index_granularity.popMark();
index_granularity.appendMark(rows_count % index_granularity_info.fixed_index_granularity);
if (rows_count != index_granularity.getTotalRows())
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Index granularity total rows in part {} does not match rows_count: {}, instead of {}",
name, index_granularity.getTotalRows(), rows_count);
}
};
if (index_granularity.empty())

View File

@ -6,10 +6,18 @@
#include <Common/CurrentThread.h>
#include <Common/MemoryTracker.h>
#include <Common/logger_useful.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
const MergeTreePartInfo MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION = {"all", 0, 0, 0};
MergeListElement::MergeListElement(const StorageID & table_id_, FutureMergedMutatedPartPtr future_part, const ContextPtr & context)
: table_id{table_id_}
, partition_id{future_part->part_info.partition_id}
@ -21,8 +29,23 @@ MergeListElement::MergeListElement(const StorageID & table_id_, FutureMergedMuta
, merge_type{future_part->merge_type}
, merge_algorithm{MergeAlgorithm::Undecided}
{
auto format_version = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING;
if (result_part_name != result_part_info.getPartNameV1())
format_version = MERGE_TREE_DATA_OLD_FORMAT_VERSION;
/// FIXME why do we need a merge list element for projection parts at all?
bool is_fake_projection_part = future_part->part_info == FAKE_RESULT_PART_FOR_PROJECTION;
size_t normal_parts_count = 0;
for (const auto & source_part : future_part->parts)
{
if (!is_fake_projection_part && !source_part->getParentPart())
{
++normal_parts_count;
if (!result_part_info.contains(MergeTreePartInfo::fromPartName(source_part->name, format_version)))
throw Exception(ErrorCodes::LOGICAL_ERROR, "Source part {} is not covered by result part {}", source_part->name, result_part_info.getPartNameV1());
}
source_part_names.emplace_back(source_part->name);
source_part_paths.emplace_back(source_part->getDataPartStorage().getFullPath());
@ -35,13 +58,17 @@ MergeListElement::MergeListElement(const StorageID & table_id_, FutureMergedMuta
if (!future_part->parts.empty())
{
source_data_version = future_part->parts[0]->info.getDataVersion();
is_mutation = (result_part_info.getDataVersion() != source_data_version);
is_mutation = (result_part_info.level == future_part->parts[0]->info.level) && !is_fake_projection_part;
WriteBufferFromString out(partition);
const auto & part = future_part->parts[0];
part->partition.serializeText(part->storage, out, {});
}
if (!is_fake_projection_part && is_mutation && normal_parts_count != 1)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got {} source parts for mutation {}: {}", future_part->parts.size(),
result_part_info.getPartNameV1(), fmt::join(source_part_names, ", "));
thread_group = ThreadGroup::createForBackgroundProcess(context);
}

View File

@ -66,6 +66,8 @@ struct Settings;
struct MergeListElement : boost::noncopyable
{
static const MergeTreePartInfo FAKE_RESULT_PART_FOR_PROJECTION;
const StorageID table_id;
std::string partition_id;
std::string partition;

View File

@ -0,0 +1,95 @@
#include <Storages/MergeTree/MergeProjectionPartsTask.h>
#include <Common/TransactionID.h>
#include <Storages/MergeTree/MergeList.h>
namespace DB
{
bool MergeProjectionPartsTask::executeStep()
{
auto & current_level_parts = level_parts[current_level];
auto & next_level_parts = level_parts[next_level];
MergeTreeData::MutableDataPartsVector selected_parts;
while (selected_parts.size() < max_parts_to_merge_in_one_level && !current_level_parts.empty())
{
selected_parts.push_back(std::move(current_level_parts.back()));
current_level_parts.pop_back();
}
if (selected_parts.empty())
{
if (next_level_parts.empty())
{
LOG_WARNING(log, "There is no projection parts merged");
/// Task is finished
return false;
}
current_level = next_level;
++next_level;
}
else if (selected_parts.size() == 1)
{
if (next_level_parts.empty())
{
LOG_DEBUG(log, "Merged a projection part in level {}", current_level);
selected_parts[0]->renameTo(projection.name + ".proj", true);
selected_parts[0]->setName(projection.name);
selected_parts[0]->is_temp = false;
new_data_part->addProjectionPart(name, std::move(selected_parts[0]));
/// Task is finished
return false;
}
else
{
LOG_DEBUG(log, "Forwarded part {} in level {} to next level", selected_parts[0]->name, current_level);
next_level_parts.push_back(std::move(selected_parts[0]));
}
}
else if (selected_parts.size() > 1)
{
// Generate a unique part name
++block_num;
auto projection_future_part = std::make_shared<FutureMergedMutatedPart>();
MergeTreeData::DataPartsVector const_selected_parts(
std::make_move_iterator(selected_parts.begin()), std::make_move_iterator(selected_parts.end()));
projection_future_part->assign(std::move(const_selected_parts));
projection_future_part->name = fmt::format("{}_{}", projection.name, ++block_num);
projection_future_part->part_info = {"all", 0, 0, 0};
MergeTreeData::MergingParams projection_merging_params;
projection_merging_params.mode = MergeTreeData::MergingParams::Ordinary;
if (projection.type == ProjectionDescription::Type::Aggregate)
projection_merging_params.mode = MergeTreeData::MergingParams::Aggregating;
LOG_DEBUG(log, "Merged {} parts in level {} to {}", selected_parts.size(), current_level, projection_future_part->name);
auto tmp_part_merge_task = mutator->mergePartsToTemporaryPart(
projection_future_part,
projection.metadata,
merge_entry,
std::make_unique<MergeListElement>((*merge_entry)->table_id, projection_future_part, context),
*table_lock_holder,
time_of_merge,
context,
space_reservation,
false, // TODO Do we need deduplicate for projections
{},
false, // no cleanup
projection_merging_params,
NO_TRANSACTION_PTR,
/* need_prefix */ true,
new_data_part.get(),
".tmp_proj");
next_level_parts.push_back(executeHere(tmp_part_merge_task));
next_level_parts.back()->is_temp = true;
}
/// Need execute again
return true;
}
}

View File

@ -0,0 +1,84 @@
#pragma once
#include <Interpreters/StorageID.h>
#include <Storages/MergeTree/IExecutableTask.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/MergeTreeDataMergerMutator.h>
#include <Storages/MergeTree/MergeProgress.h>
#include <Storages/MergeTree/FutureMergedMutatedPart.h>
#include <Storages/ProjectionsDescription.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
class MergeProjectionPartsTask : public IExecutableTask
{
public:
MergeProjectionPartsTask(
String name_,
MergeTreeData::MutableDataPartsVector && parts_,
const ProjectionDescription & projection_,
size_t & block_num_,
ContextPtr context_,
TableLockHolder * table_lock_holder_,
MergeTreeDataMergerMutator * mutator_,
MergeListEntry * merge_entry_,
time_t time_of_merge_,
MergeTreeData::MutableDataPartPtr new_data_part_,
ReservationSharedPtr space_reservation_)
: name(std::move(name_))
, parts(std::move(parts_))
, projection(projection_)
, block_num(block_num_)
, context(context_)
, table_lock_holder(table_lock_holder_)
, mutator(mutator_)
, merge_entry(merge_entry_)
, time_of_merge(time_of_merge_)
, new_data_part(new_data_part_)
, space_reservation(space_reservation_)
, log(getLogger("MergeProjectionPartsTask"))
{
LOG_DEBUG(log, "Selected {} projection_parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name);
level_parts[current_level] = std::move(parts);
}
void onCompleted() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
StorageID getStorageID() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
Priority getPriority() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
String getQueryId() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
bool executeStep() override;
private:
String name;
MergeTreeData::MutableDataPartsVector parts;
const ProjectionDescription & projection;
size_t & block_num;
ContextPtr context;
TableLockHolder * table_lock_holder;
MergeTreeDataMergerMutator * mutator;
MergeListEntry * merge_entry;
time_t time_of_merge;
MergeTreeData::MutableDataPartPtr new_data_part;
ReservationSharedPtr space_reservation;
LoggerPtr log;
std::map<size_t, MergeTreeData::MutableDataPartsVector> level_parts;
size_t current_level = 0;
size_t next_level = 1;
/// TODO(nikitamikhaylov): make this constant a setting
static constexpr size_t max_parts_to_merge_in_one_level = 10;
};
}

View File

@ -21,6 +21,8 @@
#include <Storages/MergeTree/MergeTreeSettings.h>
#include <Storages/MergeTree/FutureMergedMutatedPart.h>
#include <Storages/MergeTree/MergeTreeDataMergerMutator.h>
#include <Storages/MergeTree/MergeTreeDataWriter.h>
#include <Storages/MergeTree/MergeProjectionPartsTask.h>
#include <Processors/Transforms/ExpressionTransform.h>
#include <Processors/Transforms/MaterializingTransform.h>
#include <Processors/Transforms/FilterTransform.h>
@ -63,6 +65,7 @@ namespace ErrorCodes
extern const int SUPPORT_IS_DISABLED;
}
static ColumnsStatistics getStatisticsForColumns(
const NamesAndTypesList & columns_to_read,
const StorageMetadataPtr & metadata_snapshot)
@ -75,7 +78,7 @@ static ColumnsStatistics getStatisticsForColumns(
const auto * desc = all_columns.tryGet(column.name);
if (desc && !desc->statistics.empty())
{
auto statistics = MergeTreeStatisticsFactory::instance().get(desc->statistics);
auto statistics = MergeTreeStatisticsFactory::instance().get(*desc);
all_statistics.push_back(std::move(statistics));
}
}
@ -155,6 +158,13 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::extractMergingAndGatheringColu
}
}
for (const auto * projection : global_ctx->projections_to_rebuild)
{
Names projection_columns_vec = projection->getRequiredColumns();
std::copy(projection_columns_vec.cbegin(), projection_columns_vec.cend(),
std::inserter(key_columns, key_columns.end()));
}
/// TODO: also force "summing" and "aggregating" columns to make Horizontal merge only for such columns
for (const auto & column : global_ctx->storage_columns)
@ -254,6 +264,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
extendObjectColumns(global_ctx->storage_columns, object_columns, false);
global_ctx->storage_snapshot = std::make_shared<StorageSnapshot>(*global_ctx->data, global_ctx->metadata_snapshot, std::move(object_columns));
prepareProjectionsToMergeAndRebuild();
extractMergingAndGatheringColumns();
global_ctx->new_data_part->uuid = global_ctx->future_part->uuid;
@ -517,6 +529,148 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::execute()
}
void MergeTask::ExecuteAndFinalizeHorizontalPart::prepareProjectionsToMergeAndRebuild() const
{
const auto mode = global_ctx->data->getSettings()->deduplicate_merge_projection_mode;
/// Under throw mode, we still choose to drop projections due to backward compatibility since some
/// users might have projections before this change.
if (global_ctx->data->merging_params.mode != MergeTreeData::MergingParams::Ordinary
&& (mode == DeduplicateMergeProjectionMode::THROW || mode == DeduplicateMergeProjectionMode::DROP))
return;
/// These merging modes may or may not reduce number of rows. It's not known until the horizontal stage is finished.
const bool merge_may_reduce_rows =
global_ctx->cleanup ||
global_ctx->deduplicate ||
ctx->merging_params.mode == MergeTreeData::MergingParams::Collapsing ||
ctx->merging_params.mode == MergeTreeData::MergingParams::Replacing ||
ctx->merging_params.mode == MergeTreeData::MergingParams::VersionedCollapsing;
const auto & projections = global_ctx->metadata_snapshot->getProjections();
for (const auto & projection : projections)
{
if (merge_may_reduce_rows)
{
global_ctx->projections_to_rebuild.push_back(&projection);
continue;
}
MergeTreeData::DataPartsVector projection_parts;
for (const auto & part : global_ctx->future_part->parts)
{
auto it = part->getProjectionParts().find(projection.name);
if (it != part->getProjectionParts().end() && !it->second->is_broken)
projection_parts.push_back(it->second);
}
if (projection_parts.size() == global_ctx->future_part->parts.size())
{
global_ctx->projections_to_merge.push_back(&projection);
global_ctx->projections_to_merge_parts[projection.name].assign(projection_parts.begin(), projection_parts.end());
}
else
{
chassert(projection_parts.size() < global_ctx->future_part->parts.size());
LOG_DEBUG(ctx->log, "Projection {} is not merged because some parts don't have it", projection.name);
continue;
}
}
const auto & settings = global_ctx->context->getSettingsRef();
for (const auto * projection : global_ctx->projections_to_rebuild)
ctx->projection_squashes.emplace_back(projection->sample_block.cloneEmpty(),
settings.min_insert_block_size_rows, settings.min_insert_block_size_bytes);
}
void MergeTask::ExecuteAndFinalizeHorizontalPart::calculateProjections(const Block & block) const
{
for (size_t i = 0, size = global_ctx->projections_to_rebuild.size(); i < size; ++i)
{
const auto & projection = *global_ctx->projections_to_rebuild[i];
Block block_to_squash = projection.calculate(block, global_ctx->context);
auto & projection_squash_plan = ctx->projection_squashes[i];
projection_squash_plan.setHeader(block_to_squash.cloneEmpty());
Chunk squashed_chunk = Squashing::squash(projection_squash_plan.add({block_to_squash.getColumns(), block_to_squash.rows()}));
if (squashed_chunk)
{
auto result = projection_squash_plan.getHeader().cloneWithColumns(squashed_chunk.detachColumns());
auto tmp_part = MergeTreeDataWriter::writeTempProjectionPart(
*global_ctx->data, ctx->log, result, projection, global_ctx->new_data_part.get(), ++ctx->projection_block_num);
tmp_part.finalize();
tmp_part.part->getDataPartStorage().commitTransaction();
ctx->projection_parts[projection.name].emplace_back(std::move(tmp_part.part));
}
}
}
void MergeTask::ExecuteAndFinalizeHorizontalPart::finalizeProjections() const
{
for (size_t i = 0, size = global_ctx->projections_to_rebuild.size(); i < size; ++i)
{
const auto & projection = *global_ctx->projections_to_rebuild[i];
auto & projection_squash_plan = ctx->projection_squashes[i];
auto squashed_chunk = Squashing::squash(projection_squash_plan.flush());
if (squashed_chunk)
{
auto result = projection_squash_plan.getHeader().cloneWithColumns(squashed_chunk.detachColumns());
auto temp_part = MergeTreeDataWriter::writeTempProjectionPart(
*global_ctx->data, ctx->log, result, projection, global_ctx->new_data_part.get(), ++ctx->projection_block_num);
temp_part.finalize();
temp_part.part->getDataPartStorage().commitTransaction();
ctx->projection_parts[projection.name].emplace_back(std::move(temp_part.part));
}
}
ctx->projection_parts_iterator = std::make_move_iterator(ctx->projection_parts.begin());
if (ctx->projection_parts_iterator != std::make_move_iterator(ctx->projection_parts.end()))
constructTaskForProjectionPartsMerge();
}
void MergeTask::ExecuteAndFinalizeHorizontalPart::constructTaskForProjectionPartsMerge() const
{
auto && [name, parts] = *ctx->projection_parts_iterator;
const auto & projection = global_ctx->metadata_snapshot->projections.get(name);
ctx->merge_projection_parts_task_ptr = std::make_unique<MergeProjectionPartsTask>
(
name,
std::move(parts),
projection,
ctx->projection_block_num,
global_ctx->context,
global_ctx->holder,
global_ctx->mutator,
global_ctx->merge_entry,
global_ctx->time_of_merge,
global_ctx->new_data_part,
global_ctx->space_reservation
);
}
bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeMergeProjections() // NOLINT
{
/// In case if there are no projections we didn't construct a task
if (!ctx->merge_projection_parts_task_ptr)
return false;
if (ctx->merge_projection_parts_task_ptr->executeStep())
return true;
++ctx->projection_parts_iterator;
if (ctx->projection_parts_iterator == std::make_move_iterator(ctx->projection_parts.end()))
return false;
constructTaskForProjectionPartsMerge();
return true;
}
bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl()
{
Stopwatch watch(CLOCK_MONOTONIC_COARSE);
@ -535,6 +689,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl()
global_ctx->rows_written += block.rows();
const_cast<MergedBlockOutputStream &>(*global_ctx->to).write(block);
calculateProjections(block);
UInt64 result_rows = 0;
UInt64 result_bytes = 0;
global_ctx->merged_pipeline.tryGetResultRowsAndBytes(result_rows, result_bytes);
@ -558,8 +714,10 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl()
return true;
}
void MergeTask::ExecuteAndFinalizeHorizontalPart::finalize() const
{
finalizeProjections();
global_ctx->merging_executor.reset();
global_ctx->merged_pipeline.reset();
@ -847,35 +1005,9 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c
ReadableSize(global_ctx->merge_list_element_ptr->bytes_read_uncompressed / elapsed_seconds));
}
const auto mode = global_ctx->data->getSettings()->deduplicate_merge_projection_mode;
/// Under throw mode, we still choose to drop projections due to backward compatibility since some
/// users might have projections before this change.
if (global_ctx->data->merging_params.mode != MergeTreeData::MergingParams::Ordinary
&& (mode == DeduplicateMergeProjectionMode::THROW || mode == DeduplicateMergeProjectionMode::DROP))
for (const auto & projection : global_ctx->projections_to_merge)
{
ctx->projections_iterator = ctx->tasks_for_projections.begin();
return false;
}
const auto & projections = global_ctx->metadata_snapshot->getProjections();
for (const auto & projection : projections)
{
MergeTreeData::DataPartsVector projection_parts;
for (const auto & part : global_ctx->future_part->parts)
{
auto actual_projection_parts = part->getProjectionParts();
auto it = actual_projection_parts.find(projection.name);
if (it != actual_projection_parts.end() && !it->second->is_broken)
projection_parts.push_back(it->second);
}
if (projection_parts.size() < global_ctx->future_part->parts.size())
{
LOG_DEBUG(ctx->log, "Projection {} is not merged because some parts don't have it", projection.name);
continue;
}
MergeTreeData::DataPartsVector projection_parts = global_ctx->projections_to_merge_parts[projection->name];
LOG_DEBUG(
ctx->log,
"Selected {} projection_parts from {} to {}",
@ -885,24 +1017,25 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c
auto projection_future_part = std::make_shared<FutureMergedMutatedPart>();
projection_future_part->assign(std::move(projection_parts));
projection_future_part->name = projection.name;
projection_future_part->name = projection->name;
// TODO (ab): path in future_part is only for merge process introspection, which is not available for merges of projection parts.
// Let's comment this out to avoid code inconsistency and add it back after we implement projection merge introspection.
// projection_future_part->path = global_ctx->future_part->path + "/" + projection.name + ".proj/";
projection_future_part->part_info = {"all", 0, 0, 0};
projection_future_part->part_info = MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION;
MergeTreeData::MergingParams projection_merging_params;
projection_merging_params.mode = MergeTreeData::MergingParams::Ordinary;
if (projection.type == ProjectionDescription::Type::Aggregate)
if (projection->type == ProjectionDescription::Type::Aggregate)
projection_merging_params.mode = MergeTreeData::MergingParams::Aggregating;
ctx->tasks_for_projections.emplace_back(std::make_shared<MergeTask>(
projection_future_part,
projection.metadata,
projection->metadata,
global_ctx->merge_entry,
std::make_unique<MergeListElement>((*global_ctx->merge_entry)->table_id, projection_future_part, global_ctx->context),
global_ctx->time_of_merge,
global_ctx->context,
*global_ctx->holder,
global_ctx->space_reservation,
global_ctx->deduplicate,
global_ctx->deduplicate_by_columns,

View File

@ -9,6 +9,7 @@
#include <Compression/CompressedReadBuffer.h>
#include <Compression/CompressedReadBufferFromFile.h>
#include <Interpreters/Squashing.h>
#include <Interpreters/TemporaryDataOnDisk.h>
#include <Processors/Executors/PullingPipelineExecutor.h>
@ -72,6 +73,7 @@ public:
std::unique_ptr<MergeListElement> projection_merge_list_element_,
time_t time_of_merge_,
ContextPtr context_,
TableLockHolder & holder,
ReservationSharedPtr space_reservation_,
bool deduplicate_,
Names deduplicate_by_columns_,
@ -96,6 +98,7 @@ public:
= global_ctx->projection_merge_list_element ? global_ctx->projection_merge_list_element.get() : (*global_ctx->merge_entry)->ptr();
global_ctx->time_of_merge = std::move(time_of_merge_);
global_ctx->context = std::move(context_);
global_ctx->holder = &holder;
global_ctx->space_reservation = std::move(space_reservation_);
global_ctx->deduplicate = std::move(deduplicate_);
global_ctx->deduplicate_by_columns = std::move(deduplicate_by_columns_);
@ -151,6 +154,7 @@ private:
/// Proper initialization is responsibility of the author
struct GlobalRuntimeContext : public IStageRuntimeContext
{
TableLockHolder * holder;
MergeList::Entry * merge_entry{nullptr};
/// If not null, use this instead of the global MergeList::Entry. This is for merging projections.
std::unique_ptr<MergeListElement> projection_merge_list_element;
@ -181,6 +185,10 @@ private:
MergeAlgorithm chosen_merge_algorithm{MergeAlgorithm::Undecided};
std::vector<ProjectionDescriptionRawPtr> projections_to_rebuild{};
std::vector<ProjectionDescriptionRawPtr> projections_to_merge{};
std::map<String, MergeTreeData::DataPartsVector> projections_to_merge_parts{};
std::unique_ptr<MergeStageProgress> horizontal_stage_progress{nullptr};
std::unique_ptr<MergeStageProgress> column_progress{nullptr};
@ -228,6 +236,14 @@ private:
std::unique_ptr<WriteBuffer> rows_sources_write_buf{nullptr};
std::optional<ColumnSizeEstimator> column_sizes{};
/// For projections to rebuild
using ProjectionNameToItsBlocks = std::map<String, MergeTreeData::MutableDataPartsVector>;
ProjectionNameToItsBlocks projection_parts;
std::move_iterator<ProjectionNameToItsBlocks::iterator> projection_parts_iterator;
std::vector<Squashing> projection_squashes;
size_t projection_block_num = 0;
ExecutableTaskPtr merge_projection_parts_task_ptr;
size_t initial_reservation{0};
bool read_with_direct_io{false};
@ -257,16 +273,23 @@ private:
void finalize() const;
/// NOTE: Using pointer-to-member instead of std::function and lambda makes stacktraces much more concise and readable
using ExecuteAndFinalizeHorizontalPartSubtasks = std::array<bool(ExecuteAndFinalizeHorizontalPart::*)(), 2>;
using ExecuteAndFinalizeHorizontalPartSubtasks = std::array<bool(ExecuteAndFinalizeHorizontalPart::*)(), 3>;
const ExecuteAndFinalizeHorizontalPartSubtasks subtasks
{
&ExecuteAndFinalizeHorizontalPart::prepare,
&ExecuteAndFinalizeHorizontalPart::executeImpl
&ExecuteAndFinalizeHorizontalPart::executeImpl,
&ExecuteAndFinalizeHorizontalPart::executeMergeProjections
};
ExecuteAndFinalizeHorizontalPartSubtasks::const_iterator subtasks_iterator = subtasks.begin();
void prepareProjectionsToMergeAndRebuild() const;
void calculateProjections(const Block & block) const;
void finalizeProjections() const;
void constructTaskForProjectionPartsMerge() const;
bool executeMergeProjections();
MergeAlgorithm chooseMergeAlgorithm() const;
void createMergedStream();
void extractMergingAndGatheringColumns() const;

Some files were not shown because too many files have changed in this diff Show More