mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
HashedDictionary complex key update field initial load fix
This commit is contained in:
parent
c7bd69cc92
commit
3deb8cd410
@ -354,10 +354,10 @@ void HashedDictionary<dictionary_key_type, sparse>::updateData()
|
||||
if (!previously_loaded_block)
|
||||
previously_loaded_block = std::make_shared<DB::Block>(block.cloneEmpty());
|
||||
|
||||
for (const auto attribute_idx : ext::range(0, attributes.size() + 1))
|
||||
for (size_t attribute_index = 0; attribute_index < block.columns(); ++attribute_index)
|
||||
{
|
||||
const IColumn & update_column = *block.getByPosition(attribute_idx).column.get();
|
||||
MutableColumnPtr saved_column = previously_loaded_block->getByPosition(attribute_idx).column->assumeMutable();
|
||||
const IColumn & update_column = *block.getByPosition(attribute_index).column.get();
|
||||
MutableColumnPtr saved_column = previously_loaded_block->getByPosition(attribute_index).column->assumeMutable();
|
||||
saved_column->insertRangeFrom(update_column, 0, update_column.size());
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,30 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
|
||||
<tcp_port>9000</tcp_port>
|
||||
<listen_host>127.0.0.1</listen_host>
|
||||
|
||||
<openSSL>
|
||||
<client>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<verificationMode>none</verificationMode>
|
||||
<invalidCertificateHandler>
|
||||
<name>AcceptCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<max_concurrent_queries>500</max_concurrent_queries>
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
<path>./clickhouse/</path>
|
||||
<users_config>users.xml</users_config>
|
||||
|
||||
<dictionaries_config>/etc/clickhouse-server/config.d/*.xml</dictionaries_config>
|
||||
</yandex>
|
@ -0,0 +1,23 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<profiles>
|
||||
<default>
|
||||
</default>
|
||||
</profiles>
|
||||
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
</default>
|
||||
</users>
|
||||
|
||||
<quotas>
|
||||
<default>
|
||||
</default>
|
||||
</quotas>
|
||||
</yandex>
|
79
tests/integration/test_dictionaries_update_field/test.py
Normal file
79
tests/integration/test_dictionaries_update_field/test.py
Normal file
@ -0,0 +1,79 @@
|
||||
## sudo -H pip install PyMySQL
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.cluster import ClickHouseKiller
|
||||
from helpers.network import PartitionManager
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node = cluster.add_instance('main_node', main_configs=[])
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
node.query(
|
||||
"""
|
||||
CREATE TABLE table_for_update_field_dictionary
|
||||
(
|
||||
key UInt64,
|
||||
value String,
|
||||
last_insert_time DateTime
|
||||
)
|
||||
ENGINE = TinyLog;
|
||||
"""
|
||||
)
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
@pytest.mark.parametrize("dictionary_name,dictionary_type", [
|
||||
("flat_update_field_dictionary", "FLAT"),
|
||||
("simple_key_hashed_update_field_dictionary", "HASHED"),
|
||||
("complex_key_hashed_update_field_dictionary", "HASHED")
|
||||
])
|
||||
def test_update_field(started_cluster, dictionary_name, dictionary_type):
|
||||
create_dictionary_query = """
|
||||
CREATE DICTIONARY {dictionary_name}
|
||||
(
|
||||
key UInt64,
|
||||
value String,
|
||||
last_insert_time DateTime
|
||||
)
|
||||
PRIMARY KEY key
|
||||
SOURCE(CLICKHOUSE(table 'table_for_update_field_dictionary' update_field 'last_insert_time'))
|
||||
LAYOUT({dictionary_type}())
|
||||
LIFETIME(1);
|
||||
""".format(dictionary_name=dictionary_name, dictionary_type=dictionary_type)
|
||||
|
||||
node.query(create_dictionary_query)
|
||||
|
||||
node.query("INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());")
|
||||
node.query("SELECT * FROM {dictionary_name}".format(dictionary_name=dictionary_name))
|
||||
|
||||
node.query("INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());")
|
||||
query_result = node.query("SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format(dictionary_name=dictionary_name))
|
||||
assert query_result == '1\tFirst\n'
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
query_result = node.query("SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format(dictionary_name=dictionary_name))
|
||||
|
||||
assert query_result == '1\tFirst\n2\tSecond\n'
|
||||
|
||||
node.query("INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());")
|
||||
node.query("INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());")
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
query_result = node.query("SELECT key, value FROM {dictionary_name} ORDER BY key ASC".format(dictionary_name=dictionary_name))
|
||||
|
||||
assert query_result == '1\tFirst\n2\tSecondUpdated\n3\tThird\n'
|
||||
|
||||
node.query("TRUNCATE TABLE table_for_update_field_dictionary")
|
||||
node.query("DROP DICTIONARY {dictionary_name}".format(dictionary_name=dictionary_name))
|
Loading…
Reference in New Issue
Block a user