2019-11-25 07:46:58 +00:00
import json
import pytest
import random
import re
import string
2019-11-30 19:22:01 +00:00
import threading
2019-11-25 07:46:58 +00:00
import time
from multiprocessing . dummy import Pool
from helpers . client import QueryRuntimeException
from helpers . cluster import ClickHouseCluster
from helpers . test_tools import TSV
cluster = ClickHouseCluster ( __file__ )
node1 = cluster . add_instance ( ' node1 ' ,
config_dir = ' configs ' ,
main_configs = [ ' configs/logs_config.xml ' ] ,
with_zookeeper = True ,
tmpfs = [ ' /jbod1:size=40M ' , ' /jbod2:size=40M ' , ' /external:size=200M ' ] ,
macros = { " shard " : 0 , " replica " : 1 } )
node2 = cluster . add_instance ( ' node2 ' ,
config_dir = ' configs ' ,
main_configs = [ ' configs/logs_config.xml ' ] ,
with_zookeeper = True ,
tmpfs = [ ' /jbod1:size=40M ' , ' /jbod2:size=40M ' , ' /external:size=200M ' ] ,
macros = { " shard " : 0 , " replica " : 2 } )
@pytest.fixture ( scope = " module " )
def started_cluster ( ) :
try :
cluster . start ( )
yield cluster
finally :
cluster . shutdown ( )
def get_random_string ( length ) :
2019-11-30 19:22:01 +00:00
symbols = bytes ( string . ascii_uppercase + string . digits )
result_list = bytearray ( [ 0 ] ) * length
for i in range ( length ) :
result_list [ i ] = random . choice ( symbols )
return str ( result_list )
2019-11-25 07:46:58 +00:00
def get_used_disks_for_table ( node , table_name ) :
return node . query ( " select disk_name from system.parts where table == ' {} ' and active=1 order by modification_time " . format ( table_name ) ) . strip ( ) . split ( ' \n ' )
2019-11-28 05:07:22 +00:00
@pytest.mark.parametrize ( " name,engine,positive " , [
( " mt_test_inserts_to_disk_do_not_work " , " MergeTree() " , 0 ) ,
( " replicated_mt_test_inserts_to_disk_do_not_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_inserts_to_disk_do_not_work ' , ' 1 ' ) " , 0 ) ,
( " mt_test_inserts_to_disk_work " , " MergeTree() " , 1 ) ,
( " replicated_mt_test_inserts_to_disk_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_inserts_to_disk_work ' , ' 1 ' ) " , 1 ) ,
2019-11-25 07:46:58 +00:00
] )
2019-11-28 05:07:22 +00:00
def test_inserts_to_disk_work ( started_cluster , name , engine , positive ) :
2019-11-25 07:46:58 +00:00
try :
node1 . query ( """
CREATE TABLE { name } (
s1 String ,
2019-11-26 08:52:55 +00:00
d1 DateTime
2019-11-25 07:46:58 +00:00
) ENGINE = { engine }
ORDER BY tuple ( )
2019-11-26 08:52:55 +00:00
TTL d1 TO DISK ' external '
2019-11-25 07:46:58 +00:00
SETTINGS storage_policy = ' small_jbod_with_external '
2019-11-26 08:52:55 +00:00
""" .format(name=name, engine=engine))
2019-11-25 07:46:58 +00:00
data = [ ] # 10MB in total
for i in range ( 10 ) :
2019-11-30 19:22:01 +00:00
data . append ( ( " ' {} ' " . format ( get_random_string ( 1024 * 1024 ) ) , " toDateTime( {} ) " . format ( time . time ( ) - 1 if i > 0 or positive else time . time ( ) + 300 ) ) ) # 1MB row
2019-11-25 07:46:58 +00:00
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} (s1, d1) VALUES {} " . format ( name , " , " . join ( [ " ( " + " , " . join ( x ) + " ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-28 05:07:22 +00:00
assert set ( used_disks ) == { " external " if positive else " jbod1 " }
2019-11-25 07:46:58 +00:00
2019-11-28 05:32:26 +00:00
assert node1 . query ( " SELECT count() FROM {name} " . format ( name = name ) ) . strip ( ) == " 10 "
2019-11-25 07:46:58 +00:00
finally :
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name ) )
2019-11-28 05:07:22 +00:00
@pytest.mark.parametrize ( " name,engine,positive " , [
( " mt_test_moves_to_disk_do_not_work " , " MergeTree() " , 0 ) ,
( " replicated_mt_test_moves_to_disk_do_not_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_moves_to_disk_do_not_work ' , ' 1 ' ) " , 0 ) ,
( " mt_test_moves_to_disk_work " , " MergeTree() " , 1 ) ,
( " replicated_mt_test_moves_to_disk_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_moves_to_disk_work ' , ' 1 ' ) " , 1 ) ,
2019-11-25 07:46:58 +00:00
] )
2019-11-28 05:07:22 +00:00
def test_moves_to_disk_work ( started_cluster , name , engine , positive ) :
2019-11-25 07:46:58 +00:00
try :
node1 . query ( """
CREATE TABLE { name } (
s1 String ,
d1 DateTime
) ENGINE = { engine }
ORDER BY tuple ( )
TTL d1 TO DISK ' external '
SETTINGS storage_policy = ' small_jbod_with_external '
""" .format(name=name, engine=engine))
2019-11-30 19:22:01 +00:00
wait_expire_1 = 6
wait_expire_2 = 4
time_1 = time . time ( ) + wait_expire_1
time_2 = time . time ( ) + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading . Thread ( target = time . sleep , args = ( wait_expire_1 , ) )
wait_expire_1_thread . start ( )
2019-11-25 07:46:58 +00:00
data = [ ] # 10MB in total
for i in range ( 10 ) :
2019-11-30 19:22:01 +00:00
data . append ( ( " ' {} ' " . format ( get_random_string ( 1024 * 1024 ) ) , " toDateTime( {} ) " . format ( time_1 if i > 0 or positive else time_2 ) ) ) # 1MB row
2019-11-25 07:46:58 +00:00
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} (s1, d1) VALUES {} " . format ( name , " , " . join ( [ " ( " + " , " . join ( x ) + " ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod1 " }
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
wait_expire_1_thread . join ( )
time . sleep ( wait_expire_2 / 2 )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-28 05:07:22 +00:00
assert set ( used_disks ) == { " external " if positive else " jbod1 " }
2019-11-25 07:46:58 +00:00
2019-11-28 05:32:26 +00:00
assert node1 . query ( " SELECT count() FROM {name} " . format ( name = name ) ) . strip ( ) == " 10 "
2019-11-25 07:46:58 +00:00
finally :
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name ) )
@pytest.mark.parametrize ( " name,engine " , [
( " mt_test_moves_to_volume_work " , " MergeTree() " ) ,
2019-11-30 19:22:01 +00:00
( " replicated_mt_test_moves_to_volume_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_moves_to_volume_work ' , ' 1 ' ) " ) ,
2019-11-25 07:46:58 +00:00
] )
def test_moves_to_volume_work ( started_cluster , name , engine ) :
try :
node1 . query ( """
CREATE TABLE { name } (
p1 Int64 ,
s1 String ,
2019-11-26 08:52:55 +00:00
d1 DateTime
2019-11-25 07:46:58 +00:00
) ENGINE = { engine }
ORDER BY tuple ( )
PARTITION BY p1
2019-11-30 19:22:01 +00:00
TTL d1 TO VOLUME ' external '
SETTINGS storage_policy = ' jbods_with_external '
2019-11-26 08:52:55 +00:00
""" .format(name=name, engine=engine))
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
wait_expire_1 = 10
time_1 = time . time ( ) + wait_expire_1
wait_expire_1_thread = threading . Thread ( target = time . sleep , args = ( wait_expire_1 , ) )
wait_expire_1_thread . start ( )
2019-11-26 08:52:55 +00:00
for p in range ( 2 ) :
2019-12-01 06:04:06 +00:00
data = [ ] # 10MB in total
for i in range ( 5 ) :
2019-11-30 19:22:01 +00:00
data . append ( ( str ( p ) , " ' {} ' " . format ( get_random_string ( 1024 * 1024 ) ) , " toDateTime( {} ) " . format ( time_1 ) ) ) # 1MB row
2019-11-25 07:46:58 +00:00
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} (p1, s1, d1) VALUES {} " . format ( name , " , " . join ( [ " ( " + " , " . join ( x ) + " ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
assert set ( used_disks ) == { ' jbod1 ' , ' jbod2 ' }
2019-11-30 19:22:01 +00:00
wait_expire_1_thread . join ( )
time . sleep ( 1 )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " external " }
2019-11-25 07:46:58 +00:00
2019-12-01 06:04:06 +00:00
assert node1 . query ( " SELECT count() FROM {name} " . format ( name = name ) ) . strip ( ) == " 10 "
2019-11-25 07:46:58 +00:00
finally :
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name ) )
2019-11-30 19:22:01 +00:00
@pytest.mark.parametrize ( " name,engine,positive " , [
( " mt_test_inserts_to_volume_do_not_work " , " MergeTree() " , 0 ) ,
( " replicated_mt_test_inserts_to_volume_do_not_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_inserts_to_volume_do_not_work ' , ' 1 ' ) " , 0 ) ,
( " mt_test_inserts_to_volume_work " , " MergeTree() " , 1 ) ,
( " replicated_mt_test_inserts_to_volume_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_inserts_to_volume_work ' , ' 1 ' ) " , 1 ) ,
2019-11-25 07:46:58 +00:00
] )
2019-11-30 19:22:01 +00:00
def test_inserts_to_volume_work ( started_cluster , name , engine , positive ) :
2019-11-25 07:46:58 +00:00
try :
node1 . query ( """
CREATE TABLE { name } (
p1 Int64 ,
s1 String ,
2019-11-26 08:52:55 +00:00
d1 DateTime
2019-11-25 07:46:58 +00:00
) ENGINE = { engine }
ORDER BY tuple ( )
PARTITION BY p1
2019-11-30 19:22:01 +00:00
TTL d1 TO VOLUME ' external '
SETTINGS storage_policy = ' small_jbod_with_external '
2019-11-26 08:52:55 +00:00
""" .format(name=name, engine=engine))
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
node1 . query ( " SYSTEM STOP MOVES {name} " . format ( name = name ) )
2019-11-26 08:52:55 +00:00
for p in range ( 2 ) :
2019-11-26 07:51:59 +00:00
data = [ ] # 20MB in total
2019-11-25 07:46:58 +00:00
for i in range ( 10 ) :
2019-11-30 19:22:01 +00:00
data . append ( ( str ( p ) , " ' {} ' " . format ( get_random_string ( 1024 * 1024 ) ) , " toDateTime( {} ) " . format ( time . time ( ) - 1 if i > 0 or positive else time . time ( ) + 300 ) ) ) # 1MB row
2019-11-25 07:46:58 +00:00
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} (p1, s1, d1) VALUES {} " . format ( name , " , " . join ( [ " ( " + " , " . join ( x ) + " ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-30 19:22:01 +00:00
assert set ( used_disks ) == { " external " if positive else " jbod1 " }
2019-11-25 07:46:58 +00:00
2019-11-28 05:32:26 +00:00
assert node1 . query ( " SELECT count() FROM {name} " . format ( name = name ) ) . strip ( ) == " 20 "
2019-11-25 07:46:58 +00:00
finally :
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name ) )
@pytest.mark.parametrize ( " name,engine " , [
( " mt_test_moves_to_disk_eventually_work " , " MergeTree() " ) ,
2019-11-30 19:22:01 +00:00
( " replicated_mt_test_moves_to_disk_eventually_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_moves_to_disk_eventually_work ' , ' 1 ' ) " ) ,
2019-11-25 07:46:58 +00:00
] )
def test_moves_to_disk_eventually_work ( started_cluster , name , engine ) :
try :
name_temp = name + " _temp "
node1 . query ( """
CREATE TABLE { name } (
s1 String
2019-11-28 10:57:58 +00:00
) ENGINE = MergeTree ( )
2019-11-25 07:46:58 +00:00
ORDER BY tuple ( )
SETTINGS storage_policy = ' only_jbod2 '
2019-11-28 10:57:58 +00:00
""" .format(name=name_temp))
2019-11-25 07:46:58 +00:00
data = [ ] # 35MB in total
for i in range ( 35 ) :
data . append ( get_random_string ( 1024 * 1024 ) ) # 1MB row
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} VALUES {} " . format ( name_temp , " , " . join ( [ " ( ' " + x + " ' ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name_temp )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod2 " }
2019-11-25 07:46:58 +00:00
node1 . query ( """
CREATE TABLE { name } (
s1 String ,
2019-11-26 08:52:55 +00:00
d1 DateTime
2019-11-25 07:46:58 +00:00
) ENGINE = { engine }
ORDER BY tuple ( )
2019-11-26 08:52:55 +00:00
TTL d1 TO DISK ' jbod2 '
2019-11-25 07:46:58 +00:00
SETTINGS storage_policy = ' jbod1_with_jbod2 '
2019-11-26 08:52:55 +00:00
""" .format(name=name, engine=engine))
2019-11-25 07:46:58 +00:00
data = [ ] # 10MB in total
for i in range ( 10 ) :
2019-11-30 19:22:01 +00:00
data . append ( ( " ' {} ' " . format ( get_random_string ( 1024 * 1024 ) ) , " toDateTime( {} ) " . format ( time . time ( ) - 1 ) ) ) # 1MB row
2019-11-25 07:46:58 +00:00
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} (s1, d1) VALUES {} " . format ( name , " , " . join ( [ " ( " + " , " . join ( x ) + " ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod1 " }
2019-11-25 07:46:58 +00:00
node1 . query ( " DROP TABLE {} " . format ( name_temp ) )
time . sleep ( 2 )
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod2 " }
2019-11-25 07:46:58 +00:00
2019-11-28 05:32:26 +00:00
assert node1 . query ( " SELECT count() FROM {name} " . format ( name = name ) ) . strip ( ) == " 10 "
2019-11-25 07:46:58 +00:00
finally :
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name_temp ) )
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name ) )
2019-11-28 05:07:22 +00:00
@pytest.mark.parametrize ( " name,engine,positive " , [
( " mt_test_merges_to_disk_do_not_work " , " MergeTree() " , 0 ) ,
( " replicated_mt_test_merges_to_disk_do_not_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_merges_to_disk_do_not_work ' , ' 1 ' ) " , 0 ) ,
( " mt_test_merges_to_disk_work " , " MergeTree() " , 1 ) ,
( " replicated_mt_test_merges_to_disk_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_merges_to_disk_work ' , ' 1 ' ) " , 1 ) ,
2019-11-25 07:46:58 +00:00
] )
2019-11-28 05:07:22 +00:00
def test_merges_to_disk_work ( started_cluster , name , engine , positive ) :
2019-11-25 07:46:58 +00:00
try :
node1 . query ( """
CREATE TABLE { name } (
s1 String ,
2019-11-26 08:52:55 +00:00
d1 DateTime
2019-11-25 07:46:58 +00:00
) ENGINE = { engine }
ORDER BY tuple ( )
2019-11-26 08:52:55 +00:00
TTL d1 TO DISK ' external '
2019-11-25 07:46:58 +00:00
SETTINGS storage_policy = ' small_jbod_with_external '
2019-11-26 08:52:55 +00:00
""" .format(name=name, engine=engine))
2019-11-25 07:46:58 +00:00
node1 . query ( " SYSTEM STOP MERGES {} " . format ( name ) )
node1 . query ( " SYSTEM STOP MOVES {} " . format ( name ) )
2019-11-30 19:22:01 +00:00
wait_expire_1 = 10
wait_expire_2 = 4
time_1 = time . time ( ) + wait_expire_1
time_2 = time . time ( ) + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading . Thread ( target = time . sleep , args = ( wait_expire_1 , ) )
wait_expire_1_thread . start ( )
2019-11-25 07:46:58 +00:00
for _ in range ( 2 ) :
data = [ ] # 16MB in total
for i in range ( 8 ) :
2019-11-30 19:22:01 +00:00
data . append ( ( " ' {} ' " . format ( get_random_string ( 1024 * 1024 ) ) , " toDateTime( {} ) " . format ( time_1 if i > 0 or positive else time_2 ) ) ) # 1MB row
2019-11-25 07:46:58 +00:00
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} (s1, d1) VALUES {} " . format ( name , " , " . join ( [ " ( " + " , " . join ( x ) + " ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod1 " }
2019-11-26 07:48:25 +00:00
assert " 2 " == node1 . query ( " SELECT count() FROM system.parts WHERE table = ' {} ' AND active = 1 " . format ( name ) ) . strip ( )
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
wait_expire_1_thread . join ( )
time . sleep ( wait_expire_2 / 2 )
2019-11-25 07:46:58 +00:00
node1 . query ( " SYSTEM START MERGES {} " . format ( name ) )
node1 . query ( " OPTIMIZE TABLE {} " . format ( name ) )
time . sleep ( 1 )
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-28 05:07:22 +00:00
assert set ( used_disks ) == { " external " if positive else " jbod1 " }
2019-11-26 07:48:25 +00:00
assert " 1 " == node1 . query ( " SELECT count() FROM system.parts WHERE table = ' {} ' AND active = 1 " . format ( name ) ) . strip ( )
2019-11-25 07:46:58 +00:00
2019-11-28 05:32:26 +00:00
assert node1 . query ( " SELECT count() FROM {name} " . format ( name = name ) ) . strip ( ) == " 16 "
2019-11-25 07:46:58 +00:00
finally :
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name ) )
@pytest.mark.parametrize ( " name,engine " , [
2019-11-30 19:22:01 +00:00
( " mt_test_merges_with_full_disk_work " , " MergeTree() " ) ,
( " replicated_mt_test_merges_with_full_disk_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_merges_with_full_disk_work ' , ' 1 ' ) " ) ,
2019-11-25 07:46:58 +00:00
] )
2019-11-30 19:22:01 +00:00
def test_merges_with_full_disk_work ( started_cluster , name , engine ) :
2019-11-25 07:46:58 +00:00
try :
name_temp = name + " _temp "
node1 . query ( """
CREATE TABLE { name } (
s1 String
2019-11-28 10:57:58 +00:00
) ENGINE = MergeTree ( )
2019-11-25 07:46:58 +00:00
ORDER BY tuple ( )
SETTINGS storage_policy = ' only_jbod2 '
2019-11-28 10:57:58 +00:00
""" .format(name=name_temp))
2019-11-25 07:46:58 +00:00
data = [ ] # 35MB in total
for i in range ( 35 ) :
data . append ( get_random_string ( 1024 * 1024 ) ) # 1MB row
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} VALUES {} " . format ( name_temp , " , " . join ( [ " ( ' " + x + " ' ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name_temp )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod2 " }
2019-11-25 07:46:58 +00:00
node1 . query ( """
CREATE TABLE { name } (
s1 String ,
2019-11-26 08:52:55 +00:00
d1 DateTime
2019-11-25 07:46:58 +00:00
) ENGINE = { engine }
ORDER BY tuple ( )
2019-11-30 19:22:01 +00:00
TTL d1 TO DISK ' jbod2 '
SETTINGS storage_policy = ' jbod1_with_jbod2 '
2019-11-26 08:52:55 +00:00
""" .format(name=name, engine=engine))
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
wait_expire_1 = 10
time_1 = time . time ( ) + wait_expire_1
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
wait_expire_1_thread = threading . Thread ( target = time . sleep , args = ( wait_expire_1 , ) )
wait_expire_1_thread . start ( )
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
for _ in range ( 2 ) :
data = [ ] # 12MB in total
for i in range ( 6 ) :
data . append ( ( " ' {} ' " . format ( get_random_string ( 1024 * 1024 ) ) , " toDateTime( {} ) " . format ( time_1 ) ) ) # 1MB row
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} (s1, d1) VALUES {} " . format ( name , " , " . join ( [ " ( " + " , " . join ( x ) + " ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod1 " }
2019-11-26 07:48:25 +00:00
assert " 2 " == node1 . query ( " SELECT count() FROM system.parts WHERE table = ' {} ' AND active = 1 " . format ( name ) ) . strip ( )
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
wait_expire_1_thread . join ( )
2019-11-25 07:46:58 +00:00
node1 . query ( " OPTIMIZE TABLE {} " . format ( name ) )
2019-11-30 19:22:01 +00:00
time . sleep ( 1 )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod1 " } # Merged to the same disk against the rule.
2019-11-26 07:48:25 +00:00
assert " 1 " == node1 . query ( " SELECT count() FROM system.parts WHERE table = ' {} ' AND active = 1 " . format ( name ) ) . strip ( )
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
assert node1 . query ( " SELECT count() FROM {name} " . format ( name = name ) ) . strip ( ) == " 12 "
2019-11-25 07:46:58 +00:00
finally :
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name_temp ) )
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name ) )
2019-11-30 19:22:01 +00:00
2019-11-28 05:07:22 +00:00
@pytest.mark.parametrize ( " name,engine,positive " , [
( " mt_test_moves_after_merges_do_not_work " , " MergeTree() " , 0 ) ,
( " replicated_mt_test_moves_after_merges_do_not_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_moves_after_merges_do_not_work ' , ' 1 ' ) " , 0 ) ,
( " mt_test_moves_after_merges_work " , " MergeTree() " , 1 ) ,
( " replicated_mt_test_moves_after_merges_work " , " ReplicatedMergeTree( ' /clickhouse/replicated_test_moves_after_merges_work ' , ' 1 ' ) " , 1 ) ,
2019-11-25 07:46:58 +00:00
] )
2019-11-28 05:07:22 +00:00
def test_moves_after_merges_work ( started_cluster , name , engine , positive ) :
2019-11-25 07:46:58 +00:00
try :
node1 . query ( """
CREATE TABLE { name } (
s1 String ,
d1 DateTime
) ENGINE = { engine }
ORDER BY tuple ( )
TTL d1 TO DISK ' external '
SETTINGS storage_policy = ' small_jbod_with_external '
""" .format(name=name, engine=engine))
2019-11-30 19:22:01 +00:00
wait_expire_1 = 10
wait_expire_2 = 4
time_1 = time . time ( ) + wait_expire_1
time_2 = time . time ( ) + wait_expire_1 + wait_expire_2
wait_expire_1_thread = threading . Thread ( target = time . sleep , args = ( wait_expire_1 , ) )
wait_expire_1_thread . start ( )
2019-11-25 07:46:58 +00:00
for _ in range ( 2 ) :
2019-12-12 13:40:42 +00:00
data = [ ] # 14MB in total
for i in range ( 7 ) :
2019-11-30 19:22:01 +00:00
data . append ( ( " ' {} ' " . format ( get_random_string ( 1024 * 1024 ) ) , " toDateTime( {} ) " . format ( time_1 if i > 0 or positive else time_2 ) ) ) # 1MB row
2019-11-25 07:46:58 +00:00
2019-11-26 08:52:55 +00:00
node1 . query ( " INSERT INTO {} (s1, d1) VALUES {} " . format ( name , " , " . join ( [ " ( " + " , " . join ( x ) + " ) " for x in data ] ) ) )
2019-11-25 07:46:58 +00:00
node1 . query ( " OPTIMIZE TABLE {} " . format ( name ) )
2019-11-30 19:22:01 +00:00
time . sleep ( 1 )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-26 08:52:55 +00:00
assert set ( used_disks ) == { " jbod1 " }
2019-11-26 07:48:25 +00:00
assert " 1 " == node1 . query ( " SELECT count() FROM system.parts WHERE table = ' {} ' AND active = 1 " . format ( name ) ) . strip ( )
2019-11-25 07:46:58 +00:00
2019-11-30 19:22:01 +00:00
wait_expire_1_thread . join ( )
time . sleep ( wait_expire_2 / 2 )
2019-11-25 07:46:58 +00:00
used_disks = get_used_disks_for_table ( node1 , name )
2019-11-28 05:07:22 +00:00
assert set ( used_disks ) == { " external " if positive else " jbod1 " }
2019-11-25 07:46:58 +00:00
2019-12-12 13:40:42 +00:00
assert node1 . query ( " SELECT count() FROM {name} " . format ( name = name ) ) . strip ( ) == " 14 "
2019-11-25 07:46:58 +00:00
finally :
node1 . query ( " DROP TABLE IF EXISTS {} " . format ( name ) )