mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge pull request #23050 from ClickHouse/remove-strange-code-fix-19283
Remove wrong code, fix #19283
This commit is contained in:
commit
b029e9e4f6
@ -2252,30 +2252,27 @@ private:
|
||||
return;
|
||||
|
||||
processed_rows += block.rows();
|
||||
|
||||
/// Even if all blocks are empty, we still need to initialize the output stream to write empty resultset.
|
||||
initBlockOutputStream(block);
|
||||
|
||||
/// The header block containing zero rows was used to initialize
|
||||
/// block_out_stream, do not output it.
|
||||
/// Also do not output too much data if we're fuzzing.
|
||||
if (block.rows() != 0
|
||||
&& (query_fuzzer_runs == 0 || processed_rows < 100))
|
||||
{
|
||||
if (block.rows() == 0 || (query_fuzzer_runs != 0 && processed_rows >= 100))
|
||||
return;
|
||||
|
||||
if (need_render_progress)
|
||||
clearProgress();
|
||||
|
||||
block_out_stream->write(block);
|
||||
written_first_block = true;
|
||||
}
|
||||
|
||||
bool clear_progress = false;
|
||||
if (need_render_progress)
|
||||
clear_progress = std_out.offset() > 0;
|
||||
|
||||
if (clear_progress)
|
||||
clearProgress();
|
||||
|
||||
/// Received data block is immediately displayed to the user.
|
||||
block_out_stream->flush();
|
||||
|
||||
/// Restore progress bar after data block.
|
||||
if (clear_progress)
|
||||
if (need_render_progress)
|
||||
writeProgress();
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,6 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo
|
||||
client1.send('CREATE LIVE VIEW test.lv AS SELECT toStartOfDay(time) AS day, location, avg(temperature) FROM test.mt GROUP BY day, location ORDER BY day, location')
|
||||
client1.expect(prompt)
|
||||
client1.send('WATCH test.lv FORMAT CSVWithNames')
|
||||
client1.expect(r'_version')
|
||||
client2.send("INSERT INTO test.mt VALUES ('2019-01-01 00:00:00','New York',60),('2019-01-01 00:10:00','New York',70)")
|
||||
client2.expect(prompt)
|
||||
client1.expect(r'"2019-01-01 00:00:00","New York",65')
|
||||
|
@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "drop table if exists huge_strings"
|
||||
$CLICKHOUSE_CLIENT -q "create table huge_strings (n UInt64, l UInt64, s String, h UInt64) engine=MergeTree order by n"
|
||||
|
||||
for _ in {1..10}; do
|
||||
$CLICKHOUSE_CLIENT -q "select number, (rand() % 100*1000*1000) as l, repeat(randomString(l/1000/1000), 1000*1000) as s, cityHash64(s) from numbers(10) format Values" | $CLICKHOUSE_CLIENT -q "insert into huge_strings values" &
|
||||
$CLICKHOUSE_CLIENT -q "select number % 10, (rand() % 100) as l, randomString(l) as s, cityHash64(s) from numbers(100000)" | $CLICKHOUSE_CLIENT -q "insert into huge_strings format TSV" &
|
||||
done;
|
||||
wait
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "select count() from huge_strings"
|
||||
$CLICKHOUSE_CLIENT -q "select sum(l = length(s)) from huge_strings"
|
||||
$CLICKHOUSE_CLIENT -q "select sum(h = cityHash64(s)) from huge_strings"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "drop table huge_strings"
|
22
tests/queries/0_stateless/01184_long_insert_values_huge_strings.sh
Executable file
22
tests/queries/0_stateless/01184_long_insert_values_huge_strings.sh
Executable file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "drop table if exists huge_strings"
|
||||
$CLICKHOUSE_CLIENT -q "create table huge_strings (n UInt64, l UInt64, s String, h UInt64) engine=MergeTree order by n"
|
||||
|
||||
# Timeouts are increased, because test can be slow with sanitizers and parallel runs.
|
||||
|
||||
for _ in {1..10}; do
|
||||
$CLICKHOUSE_CLIENT --receive_timeout 100 --send_timeout 100 --connect_timeout 100 --query "select number, (rand() % 10*1000*1000) as l, repeat(randomString(l/1000/1000), 1000*1000) as s, cityHash64(s) from numbers(10) format Values" | $CLICKHOUSE_CLIENT --receive_timeout 100 --send_timeout 100 --connect_timeout 100 --query "insert into huge_strings values" &
|
||||
$CLICKHOUSE_CLIENT --receive_timeout 100 --send_timeout 100 --connect_timeout 100 --query "select number % 10, (rand() % 10) as l, randomString(l) as s, cityHash64(s) from numbers(100000)" | $CLICKHOUSE_CLIENT --receive_timeout 100 --send_timeout 100 --connect_timeout 100 --query "insert into huge_strings format TSV" &
|
||||
done;
|
||||
wait
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "select count() from huge_strings"
|
||||
$CLICKHOUSE_CLIENT -q "select sum(l = length(s)) from huge_strings"
|
||||
$CLICKHOUSE_CLIENT -q "select sum(h = cityHash64(s)) from huge_strings"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "drop table huge_strings"
|
@ -25,6 +25,8 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo
|
||||
|
||||
client1.send('DROP TABLE IF EXISTS test.lv')
|
||||
client1.expect(prompt)
|
||||
client1.send('DROP TABLE IF EXISTS test.lv_sums')
|
||||
client1.expect(prompt)
|
||||
client1.send('DROP TABLE IF EXISTS test.mt')
|
||||
client1.expect(prompt)
|
||||
client1.send('DROP TABLE IF EXISTS test.sums')
|
||||
@ -39,11 +41,9 @@ with client(name='client1>', log=log) as client1, client(name='client2>', log=lo
|
||||
client3.expect(prompt)
|
||||
|
||||
client3.send("WATCH test.lv_sums FORMAT CSVWithNames")
|
||||
client3.expect('_version')
|
||||
|
||||
client1.send('INSERT INTO test.sums WATCH test.lv')
|
||||
client1.expect(r'INSERT INTO')
|
||||
client1.expect(r'Progress')
|
||||
|
||||
client3.expect('0,1.*\r\n')
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user