mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Decrease part size in 02700_s3_part_INT_MAX
Seems that the IO is too slow [1]. [1]: https://s3.amazonaws.com/clickhouse-test-reports/47693/6d869e3b0d3e439260eb54c32e3fa99acecae063/stateless_tests_flaky_check__asan_.html Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
This commit is contained in:
parent
91713325a2
commit
8dcee4c482
@ -1 +1 @@
|
||||
3145728
|
||||
2097152
|
||||
|
@ -2,8 +2,7 @@
|
||||
|
||||
-- Regression test for crash in case of part exceeds INT_MAX
|
||||
INSERT INTO FUNCTION s3('http://localhost:11111/test/test_INT_MAX.tsv', '', '', 'TSV')
|
||||
-- NOTE: 2GiB is enough, but let's use 3GiB, just in case.
|
||||
SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 3) / 1024)
|
||||
SELECT repeat('a', 1024) FROM numbers((pow(2, 30) * 2) / 1024)
|
||||
SETTINGS s3_truncate_on_insert = 1, s3_max_single_part_upload_size = '10Gi';
|
||||
|
||||
SELECT count() FROM s3('http://localhost:11111/test/test_INT_MAX.tsv');
|
||||
|
Loading…
Reference in New Issue
Block a user