mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge pull request #11913 from ClickHouse/compact-parts-by-default
Enable compact parts by default for small parts
This commit is contained in:
commit
eb9ee723c3
@ -63,8 +63,7 @@ std::shared_ptr<TSystemLog> createSystemLog(
|
||||
engine = "ENGINE = MergeTree";
|
||||
if (!partition_by.empty())
|
||||
engine += " PARTITION BY (" + partition_by + ")";
|
||||
engine += " ORDER BY (event_date, event_time)"
|
||||
"SETTINGS min_bytes_for_wide_part = '10M'"; /// Use polymorphic parts for log tables by default
|
||||
engine += " ORDER BY (event_date, event_time)";
|
||||
}
|
||||
|
||||
size_t flush_interval_milliseconds = config.getUInt64(config_prefix + ".flush_interval_milliseconds",
|
||||
|
@ -20,7 +20,7 @@ struct Settings;
|
||||
M(UInt64, index_granularity, 8192, "How many rows correspond to one primary key value.", 0) \
|
||||
\
|
||||
/** Data storing format settings. */ \
|
||||
M(UInt64, min_bytes_for_wide_part, 0, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \
|
||||
M(UInt64, min_bytes_for_wide_part, 10485760, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \
|
||||
M(UInt64, min_rows_for_wide_part, 0, "Minimal number of rows to create part in wide format instead of compact", 0) \
|
||||
M(UInt64, min_bytes_for_compact_part, 0, "Experimental. Minimal uncompressed size in bytes to create part in compact format instead of saving it in RAM", 0) \
|
||||
M(UInt64, min_rows_for_compact_part, 0, "Experimental. Minimal number of rows to create part in compact format instead of saving it in RAM", 0) \
|
||||
@ -105,6 +105,9 @@ struct Settings;
|
||||
M(String, storage_policy, "default", "Name of storage disk policy", 0) \
|
||||
M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \
|
||||
\
|
||||
/** Settings for testing purposes */ \
|
||||
M(Bool, randomize_part_type, false, "For testing purposes only. Randomizes part type between wide and compact", 0) \
|
||||
\
|
||||
/** Obsolete settings. Kept for backward compatibility only. */ \
|
||||
M(UInt64, min_relative_delay_to_yield_leadership, 120, "Obsolete setting, does nothing.", 0) \
|
||||
M(UInt64, check_delay_period, 60, "Obsolete setting, does nothing.", 0) \
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/OptimizedRegularExpression.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
@ -233,6 +234,25 @@ If you use the Replicated version of engines, see https://clickhouse.tech/docs/e
|
||||
}
|
||||
|
||||
|
||||
static void randomizePartTypeSettings(const std::unique_ptr<MergeTreeSettings> & storage_settings)
|
||||
{
|
||||
static constexpr auto MAX_THRESHOLD_FOR_ROWS = 100000;
|
||||
static constexpr auto MAX_THRESHOLD_FOR_BYTES = 1024 * 1024 * 10;
|
||||
|
||||
/// Create all parts in wide format with probability 1/3.
|
||||
if (thread_local_rng() % 3 == 0)
|
||||
{
|
||||
storage_settings->min_rows_for_wide_part = 0;
|
||||
storage_settings->min_bytes_for_wide_part = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
storage_settings->min_rows_for_wide_part = std::uniform_int_distribution{0, MAX_THRESHOLD_FOR_ROWS}(thread_local_rng);
|
||||
storage_settings->min_bytes_for_wide_part = std::uniform_int_distribution{0, MAX_THRESHOLD_FOR_BYTES}(thread_local_rng);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
{
|
||||
/** [Replicated][|Summing|Collapsing|Aggregating|Replacing|Graphite]MergeTree (2 * 7 combinations) engines
|
||||
@ -653,6 +673,20 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
++arg_num;
|
||||
}
|
||||
|
||||
/// Allow to randomize part type for tests to cover more cases.
|
||||
/// But if settings were set explicitly restrict it.
|
||||
if (storage_settings->randomize_part_type
|
||||
&& !storage_settings->min_rows_for_wide_part.changed
|
||||
&& !storage_settings->min_bytes_for_wide_part.changed)
|
||||
{
|
||||
randomizePartTypeSettings(storage_settings);
|
||||
LOG_INFO(&Poco::Logger::get(args.table_id.getNameForLogs() + " (registerStorageMergeTree)"),
|
||||
"Applied setting 'randomize_part_type'. "
|
||||
"Setting 'min_rows_for_wide_part' changed to {}. "
|
||||
"Setting 'min_bytes_for_wide_part' changed to {}.",
|
||||
storage_settings->min_rows_for_wide_part, storage_settings->min_bytes_for_wide_part);
|
||||
}
|
||||
|
||||
if (arg_num != arg_cnt)
|
||||
throw Exception("Wrong number of engine arguments.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
|
@ -506,15 +506,6 @@ def collect_build_flags(client):
|
||||
else:
|
||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||
|
||||
clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
(stdout, stderr) = clickhouse_proc.communicate("SELECT value FROM system.merge_tree_settings WHERE name = 'min_bytes_for_wide_part'")
|
||||
|
||||
if clickhouse_proc.returncode == 0:
|
||||
if '10485760' in stdout:
|
||||
result.append(BuildFlags.POLYMORPHIC_PARTS)
|
||||
else:
|
||||
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
|
@ -0,0 +1,6 @@
|
||||
<yandex>
|
||||
<merge_tree>
|
||||
<min_rows_for_wide_part>0</min_rows_for_wide_part>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
@ -6,45 +6,23 @@ from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True)
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
|
||||
|
||||
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18',
|
||||
with_installed_binary=True)
|
||||
node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True)
|
||||
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', with_installed_binary=True)
|
||||
node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
|
||||
node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15',
|
||||
with_installed_binary=True)
|
||||
node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True)
|
||||
node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', with_installed_binary=True)
|
||||
node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
|
||||
node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True,
|
||||
with_installed_binary=True)
|
||||
node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True,
|
||||
with_installed_binary=True)
|
||||
node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, with_installed_binary=True)
|
||||
node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
node9 = cluster.add_instance('node9', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml',
|
||||
'configs/merge_tree_settings.xml'], with_zookeeper=True,
|
||||
image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True,
|
||||
with_installed_binary=True)
|
||||
node10 = cluster.add_instance('node10', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml',
|
||||
'configs/merge_tree_settings.xml'], with_zookeeper=True,
|
||||
image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True,
|
||||
with_installed_binary=True)
|
||||
node9 = cluster.add_instance('node9', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/merge_tree_settings.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True)
|
||||
node10 = cluster.add_instance('node10', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml', 'configs/merge_tree_settings.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.6.3.18', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
node11 = cluster.add_instance('node11', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True,
|
||||
with_installed_binary=True)
|
||||
node12 = cluster.add_instance('node12', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'],
|
||||
with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True,
|
||||
with_installed_binary=True)
|
||||
node11 = cluster.add_instance('node11', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True)
|
||||
node12 = cluster.add_instance('node12', main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True, image='yandex/clickhouse-server', tag='19.1.15', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
|
||||
def prepare_single_pair_with_setting(first_node, second_node, group):
|
||||
@ -296,10 +274,14 @@ def test_mixed_granularity_single_node(start_dynamic_cluster, node):
|
||||
"INSERT INTO table_with_default_granularity VALUES (toDate('2018-09-01'), 1, 333), (toDate('2018-09-02'), 2, 444)")
|
||||
|
||||
def callback(n):
|
||||
n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml",
|
||||
"<yandex><merge_tree><enable_mixed_granularity_parts>1</enable_mixed_granularity_parts></merge_tree></yandex>")
|
||||
n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml",
|
||||
"<yandex><merge_tree><enable_mixed_granularity_parts>1</enable_mixed_granularity_parts></merge_tree></yandex>")
|
||||
new_config = """
|
||||
<yandex><merge_tree>
|
||||
<enable_mixed_granularity_parts>1</enable_mixed_granularity_parts>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree></yandex>"""
|
||||
|
||||
n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config)
|
||||
n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config)
|
||||
|
||||
node.restart_with_latest_version(callback_onstop=callback)
|
||||
node.query("SYSTEM RELOAD CONFIG")
|
||||
@ -342,10 +324,14 @@ def test_version_update_two_nodes(start_dynamic_cluster):
|
||||
assert node12.query("SELECT COUNT() FROM table_with_default_granularity") == '2\n'
|
||||
|
||||
def callback(n):
|
||||
n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml",
|
||||
"<yandex><merge_tree><enable_mixed_granularity_parts>0</enable_mixed_granularity_parts></merge_tree></yandex>")
|
||||
n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml",
|
||||
"<yandex><merge_tree><enable_mixed_granularity_parts>0</enable_mixed_granularity_parts></merge_tree></yandex>")
|
||||
new_config = """
|
||||
<yandex><merge_tree>
|
||||
<enable_mixed_granularity_parts>0</enable_mixed_granularity_parts>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree></yandex>"""
|
||||
|
||||
n.replace_config("/etc/clickhouse-server/merge_tree_settings.xml", new_config)
|
||||
n.replace_config("/etc/clickhouse-server/config.d/merge_tree_settings.xml", new_config)
|
||||
|
||||
node12.restart_with_latest_version(callback_onstop=callback)
|
||||
|
||||
|
@ -0,0 +1,5 @@
|
||||
<yandex>
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
@ -3,9 +3,8 @@ import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.17.8.54',
|
||||
stay_alive=True, with_installed_binary=True)
|
||||
node2 = cluster.add_instance('node2', with_zookeeper=True)
|
||||
node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='19.17.8.54', stay_alive=True, with_installed_binary=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@ -25,7 +24,7 @@ def start_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_backward_compatability(start_cluster):
|
||||
def test_backward_compatability1(start_cluster):
|
||||
node2.query("INSERT INTO t VALUES (today(), 1)")
|
||||
node1.query("SYSTEM SYNC REPLICA t", timeout=10)
|
||||
|
||||
|
@ -21,7 +21,8 @@ def started_cluster():
|
||||
|
||||
node1.query('''
|
||||
CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32)
|
||||
ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id;
|
||||
ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id
|
||||
SETTINGS min_bytes_for_wide_part=0;
|
||||
''')
|
||||
|
||||
yield cluster
|
||||
|
@ -0,0 +1,6 @@
|
||||
<yandex>
|
||||
<merge_tree>
|
||||
<min_rows_for_wide_part>0</min_rows_for_wide_part>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
@ -6,12 +6,9 @@ from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml'], with_zookeeper=True)
|
||||
node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml'],
|
||||
image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True,
|
||||
with_installed_binary=True)
|
||||
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
node2 = cluster.add_instance('node2', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], with_zookeeper=True)
|
||||
node3 = cluster.add_instance('node3', main_configs=['configs/default_compression.xml', 'configs/wide_parts_only.xml'], image='yandex/clickhouse-server', tag='20.3.16', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
|
@ -19,7 +19,7 @@ def test_file_path_escaping(started_cluster):
|
||||
node.query('CREATE DATABASE IF NOT EXISTS test ENGINE = Ordinary')
|
||||
node.query('''
|
||||
CREATE TABLE test.`T.a_b,l-e!` (`~Id` UInt32)
|
||||
ENGINE = MergeTree() PARTITION BY `~Id` ORDER BY `~Id`;
|
||||
ENGINE = MergeTree() PARTITION BY `~Id` ORDER BY `~Id` SETTINGS min_bytes_for_wide_part = 0;
|
||||
''')
|
||||
node.query('''INSERT INTO test.`T.a_b,l-e!` VALUES (1);''')
|
||||
node.query('''ALTER TABLE test.`T.a_b,l-e!` FREEZE;''')
|
||||
|
@ -25,4 +25,8 @@
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
||||
|
@ -18,4 +18,8 @@
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
||||
|
@ -40,7 +40,8 @@ def get_query_stat(instance, hint):
|
||||
return result
|
||||
|
||||
|
||||
def test_write_is_cached(cluster):
|
||||
@pytest.mark.parametrize("min_rows_for_wide_part,read_requests", [(0, 2), (8192, 1)])
|
||||
def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests):
|
||||
node = cluster.instances["node"]
|
||||
|
||||
node.query(
|
||||
@ -50,8 +51,8 @@ def test_write_is_cached(cluster):
|
||||
data String
|
||||
) ENGINE=MergeTree()
|
||||
ORDER BY id
|
||||
SETTINGS storage_policy='s3'
|
||||
"""
|
||||
SETTINGS storage_policy='s3', min_rows_for_wide_part={}
|
||||
""".format(min_rows_for_wide_part)
|
||||
)
|
||||
|
||||
node.query("SYSTEM FLUSH LOGS")
|
||||
@ -63,12 +64,12 @@ def test_write_is_cached(cluster):
|
||||
assert node.query(select_query) == "(0,'data'),(1,'data')"
|
||||
|
||||
stat = get_query_stat(node, select_query)
|
||||
assert stat["S3ReadRequestsCount"] == 2 # Only .bin files should be accessed from S3.
|
||||
assert stat["S3ReadRequestsCount"] == read_requests # Only .bin files should be accessed from S3.
|
||||
|
||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
||||
|
||||
|
||||
def test_read_after_cache_is_wiped(cluster):
|
||||
@pytest.mark.parametrize("min_rows_for_wide_part,all_files,bin_files", [(0, 4, 2), (8192, 2, 1)])
|
||||
def test_read_after_cache_is_wiped(cluster, min_rows_for_wide_part, all_files, bin_files):
|
||||
node = cluster.instances["node"]
|
||||
|
||||
node.query(
|
||||
@ -78,8 +79,8 @@ def test_read_after_cache_is_wiped(cluster):
|
||||
data String
|
||||
) ENGINE=MergeTree()
|
||||
ORDER BY id
|
||||
SETTINGS storage_policy='s3'
|
||||
"""
|
||||
SETTINGS storage_policy='s3', min_rows_for_wide_part={}
|
||||
""".format(min_rows_for_wide_part)
|
||||
)
|
||||
|
||||
node.query("SYSTEM FLUSH LOGS")
|
||||
@ -93,12 +94,12 @@ def test_read_after_cache_is_wiped(cluster):
|
||||
select_query = "SELECT * FROM s3_test"
|
||||
node.query(select_query)
|
||||
stat = get_query_stat(node, select_query)
|
||||
assert stat["S3ReadRequestsCount"] == 4 # .mrk and .bin files should be accessed from S3.
|
||||
assert stat["S3ReadRequestsCount"] == all_files # .mrk and .bin files should be accessed from S3.
|
||||
|
||||
# After cache is populated again, only .bin files should be accessed from S3.
|
||||
select_query = "SELECT * FROM s3_test order by id FORMAT Values"
|
||||
assert node.query(select_query) == "(0,'data'),(1,'data')"
|
||||
stat = get_query_stat(node, select_query)
|
||||
assert stat["S3ReadRequestsCount"] == 2
|
||||
assert stat["S3ReadRequestsCount"] == bin_files
|
||||
|
||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
||||
|
@ -0,0 +1,6 @@
|
||||
<yandex>
|
||||
<merge_tree>
|
||||
<min_rows_for_wide_part>0</min_rows_for_wide_part>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
@ -8,7 +8,7 @@ from helpers.test_tools import assert_eq_with_retry
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1')
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/wide_parts_only.xml'])
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
|
@ -176,7 +176,7 @@ def test_attach_check_all_parts(attach_check_all_parts_table):
|
||||
exec_bash('cp -pr {} {}'.format(path_to_detached + '0_3_3_0', path_to_detached + 'deleting_0_7_7_0'))
|
||||
|
||||
error = instance.client.query_and_get_error("ALTER TABLE test.attach_partition ATTACH PARTITION 0")
|
||||
assert 0 <= error.find('No columns in part 0_5_5_0')
|
||||
assert 0 <= error.find('No columns in part 0_5_5_0') or 0 <= error.find('No columns.txt in part 0_5_5_0')
|
||||
|
||||
parts = q("SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name")
|
||||
assert TSV(parts) == TSV('1_2_2_0\n1_4_4_0')
|
||||
|
@ -1,5 +1,6 @@
|
||||
<yandex>
|
||||
<merge_tree>
|
||||
<min_rows_for_wide_part>512</min_rows_for_wide_part>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
||||
|
@ -44,10 +44,10 @@ def create_tables(name, nodes, node_settings, shard):
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity = 64, index_granularity_bytes = {index_granularity_bytes},
|
||||
min_rows_for_wide_part = {min_rows_for_wide_part}, min_rows_for_compact_part = {min_rows_for_compact_part},
|
||||
min_bytes_for_wide_part = 0, min_bytes_for_compact_part = 0,
|
||||
in_memory_parts_enable_wal = 1
|
||||
'''.format(name=name, shard=shard, repl=i, **settings))
|
||||
|
||||
|
||||
def create_tables_old_format(name, nodes, shard):
|
||||
for i, node in enumerate(nodes):
|
||||
node.query(
|
||||
|
@ -18,4 +18,8 @@
|
||||
</s3>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
||||
|
@ -32,7 +32,8 @@ def cluster():
|
||||
|
||||
FILES_OVERHEAD = 1
|
||||
FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files
|
||||
FILES_OVERHEAD_PER_PART = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1
|
||||
FILES_OVERHEAD_PER_PART_WIDE = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1
|
||||
FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1
|
||||
|
||||
|
||||
def random_string(length):
|
||||
@ -46,7 +47,7 @@ def generate_values(date_str, count, sign=1):
|
||||
return ",".join(["('{}',{},'{}')".format(x, y, z) for x, y, z in data])
|
||||
|
||||
|
||||
def create_table(cluster):
|
||||
def create_table(cluster, additional_settings=None):
|
||||
create_table_statement = """
|
||||
CREATE TABLE s3_test (
|
||||
dt Date,
|
||||
@ -58,6 +59,9 @@ def create_table(cluster):
|
||||
ORDER BY (dt, id)
|
||||
SETTINGS storage_policy='s3'
|
||||
"""
|
||||
if additional_settings:
|
||||
create_table_statement += ","
|
||||
create_table_statement += additional_settings
|
||||
|
||||
for node in cluster.instances.values():
|
||||
node.query(create_table_statement)
|
||||
@ -74,9 +78,15 @@ def drop_table(cluster):
|
||||
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
|
||||
minio.remove_object(cluster.minio_bucket, obj.object_name)
|
||||
|
||||
|
||||
def test_insert_select_replicated(cluster):
|
||||
create_table(cluster)
|
||||
@pytest.mark.parametrize(
|
||||
"min_rows_for_wide_part,files_per_part",
|
||||
[
|
||||
(0, FILES_OVERHEAD_PER_PART_WIDE),
|
||||
(8192, FILES_OVERHEAD_PER_PART_COMPACT)
|
||||
]
|
||||
)
|
||||
def test_insert_select_replicated(cluster, min_rows_for_wide_part, files_per_part):
|
||||
create_table(cluster, additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part))
|
||||
|
||||
all_values = ""
|
||||
for node_idx in range(1, 4):
|
||||
@ -93,5 +103,4 @@ def test_insert_select_replicated(cluster):
|
||||
settings={"select_sequential_consistency": 1}) == all_values
|
||||
|
||||
minio = cluster.minio_client
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 3 * (
|
||||
FILES_OVERHEAD + FILES_OVERHEAD_PER_PART * 3)
|
||||
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == 3 * (FILES_OVERHEAD + files_per_part * 3)
|
||||
|
@ -29,7 +29,7 @@ def drop_table(nodes, table_name):
|
||||
node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name))
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
# Column TTL works only with wide parts, because it's very expensive to apply it for compact parts
|
||||
def test_ttl_columns(started_cluster):
|
||||
drop_table([node1, node2], "test_ttl")
|
||||
for node in [node1, node2]:
|
||||
@ -37,7 +37,7 @@ def test_ttl_columns(started_cluster):
|
||||
'''
|
||||
CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}')
|
||||
ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0;
|
||||
ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0;
|
||||
'''.format(replica=node.name))
|
||||
|
||||
node1.query("INSERT INTO test_ttl VALUES (toDateTime('2000-10-10 00:00:00'), 1, 1, 3)")
|
||||
@ -58,7 +58,8 @@ def test_merge_with_ttl_timeout(started_cluster):
|
||||
'''
|
||||
CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}')
|
||||
ORDER BY id PARTITION BY toDayOfMonth(date);
|
||||
ORDER BY id PARTITION BY toDayOfMonth(date)
|
||||
SETTINGS min_bytes_for_wide_part=0;
|
||||
'''.format(replica=node.name, table=table))
|
||||
|
||||
node1.query("SYSTEM STOP TTL MERGES {table}".format(table=table))
|
||||
@ -204,7 +205,7 @@ def test_ttl_double_delete_rule_returns_error(started_cluster):
|
||||
CREATE TABLE test_ttl(date DateTime, id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}')
|
||||
ORDER BY id PARTITION BY toDayOfMonth(date)
|
||||
TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0;
|
||||
TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0
|
||||
'''.format(replica=node1.name))
|
||||
assert False
|
||||
except client.QueryRuntimeException:
|
||||
@ -254,6 +255,7 @@ limitations under the License."""
|
||||
) ENGINE = {engine}
|
||||
ORDER BY tuple()
|
||||
TTL d1 + INTERVAL 1 DAY DELETE
|
||||
SETTINGS min_bytes_for_wide_part=0
|
||||
""".format(name=name, engine=engine))
|
||||
|
||||
node1.query("""ALTER TABLE {name} MODIFY COLUMN s1 String TTL d1 + INTERVAL 1 SECOND""".format(name=name))
|
||||
|
@ -1,7 +1,7 @@
|
||||
DROP TABLE IF EXISTS mt_00160;
|
||||
DROP TABLE IF EXISTS merge_00160;
|
||||
|
||||
CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64) ENGINE = MergeTree(d, x, 1);
|
||||
CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64) ENGINE = MergeTree PARTITION BY d ORDER BY x SETTINGS index_granularity = 1, min_bytes_for_wide_part = 0;
|
||||
CREATE TABLE merge_00160 (d Date, x UInt64) ENGINE = Merge(currentDatabase(), '^mt_00160$');
|
||||
|
||||
SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0;
|
||||
@ -14,7 +14,7 @@ SELECT *, b FROM merge_00160 WHERE x IN (12345, 67890) AND NOT ignore(blockSize(
|
||||
DROP TABLE merge_00160;
|
||||
DROP TABLE mt_00160;
|
||||
|
||||
CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64, y UInt64, z UInt64) ENGINE = MergeTree(d, (x, z), 1);
|
||||
CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64, y UInt64, z UInt64) ENGINE = MergeTree PARTITION BY d ORDER BY (x, z) SETTINGS index_granularity = 1, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO mt_00160 (x, y, z) SELECT number AS x, number + 10 AS y, number / 2 AS z FROM system.numbers LIMIT 100000;
|
||||
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE preferred_block_size_bytes (p Date, s String) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=1, index_granularity_bytes=0"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE preferred_block_size_bytes (p Date, s String) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=1, index_granularity_bytes=0, min_bytes_for_wide_part = 0"
|
||||
$CLICKHOUSE_CLIENT -q "INSERT INTO preferred_block_size_bytes (s) SELECT '16_bytes_-_-_-_' AS s FROM system.numbers LIMIT 10, 90"
|
||||
$CLICKHOUSE_CLIENT -q "OPTIMIZE TABLE preferred_block_size_bytes"
|
||||
$CLICKHOUSE_CLIENT --preferred_block_size_bytes=26 -q "SELECT DISTINCT blockSize(), ignore(p, s) FROM preferred_block_size_bytes"
|
||||
@ -17,7 +17,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes"
|
||||
# PREWHERE using empty column
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS pbs"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE pbs (p Date, i UInt64, sa Array(String)) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=100, index_granularity_bytes=0"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE pbs (p Date, i UInt64, sa Array(String)) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=100, index_granularity_bytes=0, min_bytes_for_wide_part = 0"
|
||||
$CLICKHOUSE_CLIENT -q "INSERT INTO pbs (p, i, sa) SELECT toDate(i % 30) AS p, number AS i, ['a'] AS sa FROM system.numbers LIMIT 1000"
|
||||
$CLICKHOUSE_CLIENT -q "ALTER TABLE pbs ADD COLUMN s UInt8 DEFAULT 0"
|
||||
$CLICKHOUSE_CLIENT --preferred_block_size_bytes=100000 -q "SELECT count() FROM pbs PREWHERE s = 0"
|
||||
@ -28,7 +28,7 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE pbs"
|
||||
# Nullable PREWHERE
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE nullable_prewhere (p Date, f Nullable(UInt64), d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=8, index_granularity_bytes=0"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE nullable_prewhere (p Date, f Nullable(UInt64), d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY p SETTINGS index_granularity=8, index_granularity_bytes=0, min_bytes_for_wide_part = 0"
|
||||
$CLICKHOUSE_CLIENT -q "INSERT INTO nullable_prewhere SELECT toDate(0) AS p, if(number % 2 = 0, CAST(number AS Nullable(UInt64)), CAST(NULL AS Nullable(UInt64))) AS f, number as d FROM system.numbers LIMIT 1001"
|
||||
$CLICKHOUSE_CLIENT -q "SELECT sum(d), sum(f), max(d) FROM nullable_prewhere PREWHERE NOT isNull(f)"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS nullable_prewhere"
|
||||
|
@ -3,7 +3,7 @@ SELECT '===Ordinary case===';
|
||||
SET replication_alter_partitions_sync = 2;
|
||||
|
||||
DROP TABLE IF EXISTS clear_column;
|
||||
CREATE TABLE clear_column (d Date, num Int64, str String) ENGINE = MergeTree(d, d, 8192);
|
||||
CREATE TABLE clear_column (d Date, num Int64, str String) ENGINE = MergeTree ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO clear_column VALUES ('2016-12-12', 1, 'a'), ('2016-11-12', 2, 'b');
|
||||
|
||||
@ -24,8 +24,8 @@ SELECT '===Replicated case===';
|
||||
DROP TABLE IF EXISTS clear_column1;
|
||||
DROP TABLE IF EXISTS clear_column2;
|
||||
SELECT sleep(1) FORMAT Null;
|
||||
CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '1', d, d, 8192);
|
||||
CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '2', d, d, 8192);
|
||||
CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '1') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0;
|
||||
CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '2') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO clear_column1 (d) VALUES ('2000-01-01'), ('2000-02-01');
|
||||
SYSTEM SYNC REPLICA clear_column2;
|
||||
|
@ -1,5 +1,5 @@
|
||||
drop table if exists tab_00484;
|
||||
create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 8192);
|
||||
create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0;
|
||||
insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 8192;
|
||||
|
||||
set preferred_block_size_bytes = 2000000;
|
||||
@ -15,19 +15,19 @@ set preferred_max_column_in_block_size_bytes = 4194304;
|
||||
select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484;
|
||||
|
||||
drop table if exists tab_00484;
|
||||
create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 32);
|
||||
create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0;
|
||||
insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 47;
|
||||
set preferred_max_column_in_block_size_bytes = 1152;
|
||||
select blockSize(), * from tab_00484 where x = 1 or x > 36 format Null;
|
||||
|
||||
drop table if exists tab_00484;
|
||||
create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree(date, (date, x), 8192);
|
||||
create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0;
|
||||
insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 10;
|
||||
set preferred_max_column_in_block_size_bytes = 128;
|
||||
select s from tab_00484 where s == '' format Null;
|
||||
|
||||
drop table if exists tab_00484;
|
||||
create table tab_00484 (date Date, x UInt64, s String) engine = MergeTree(date, (date, x), 8192);
|
||||
create table tab_00484 (date Date, x UInt64, s String) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0;
|
||||
insert into tab_00484 select today(), number, 'abc' from system.numbers limit 81920;
|
||||
set preferred_block_size_bytes = 0;
|
||||
select count(*) from tab_00484 prewhere s != 'abc' format Null;
|
||||
|
@ -20,13 +20,12 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS unsigned_integer_test_table;"
|
||||
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS enum_test_table;"
|
||||
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS date_test_table;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE string_test_table (val String) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE fixed_string_test_table (val FixedString(1)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE signed_integer_test_table (val Int32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE unsigned_integer_test_table (val UInt32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'yandex' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE date_test_table (val Date) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE string_test_table (val String) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE fixed_string_test_table (val FixedString(1)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE signed_integer_test_table (val Int32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE unsigned_integer_test_table (val UInt32) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE enum_test_table (val Enum16('hello' = 1, 'world' = 2, 'yandex' = 256, 'clickhouse' = 257)) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE date_test_table (val Date) ENGINE = MergeTree ORDER BY val SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0;"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES string_test_table;"
|
||||
${CLICKHOUSE_CLIENT} --query="SYSTEM STOP MERGES fixed_string_test_table;"
|
||||
|
@ -9,7 +9,8 @@ CREATE TABLE check_system_tables
|
||||
) ENGINE = MergeTree()
|
||||
ORDER BY name1
|
||||
PARTITION BY name2
|
||||
SAMPLE BY name1;
|
||||
SAMPLE BY name1
|
||||
SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
SELECT name, partition_key, sorting_key, primary_key, sampling_key, storage_policy, total_rows
|
||||
FROM system.tables
|
||||
|
@ -63,7 +63,7 @@ CREATE TABLE large_alter_table_00804 (
|
||||
somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
|
||||
id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
|
||||
data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4)
|
||||
) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2;
|
||||
) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO large_alter_table_00804 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000;
|
||||
|
||||
|
@ -7,12 +7,12 @@ DROP TABLE IF EXISTS default_codec_synthetic;
|
||||
CREATE TABLE delta_codec_synthetic
|
||||
(
|
||||
id UInt64 Codec(Delta, ZSTD(3))
|
||||
) ENGINE MergeTree() ORDER BY tuple();
|
||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
CREATE TABLE default_codec_synthetic
|
||||
(
|
||||
id UInt64 Codec(ZSTD(3))
|
||||
) ENGINE MergeTree() ORDER BY tuple();
|
||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO delta_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000;
|
||||
INSERT INTO default_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000;
|
||||
@ -45,12 +45,12 @@ DROP TABLE IF EXISTS default_codec_float;
|
||||
CREATE TABLE delta_codec_float
|
||||
(
|
||||
id Float64 Codec(Delta, LZ4HC)
|
||||
) ENGINE MergeTree() ORDER BY tuple();
|
||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
CREATE TABLE default_codec_float
|
||||
(
|
||||
id Float64 Codec(LZ4HC)
|
||||
) ENGINE MergeTree() ORDER BY tuple();
|
||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO delta_codec_float SELECT number FROM numbers(1547510400, 500000) WHERE number % 3 == 0 OR number % 5 == 0 OR number % 7 == 0 OR number % 11 == 0;
|
||||
INSERT INTO default_codec_float SELECT * from delta_codec_float;
|
||||
@ -83,12 +83,12 @@ DROP TABLE IF EXISTS default_codec_string;
|
||||
CREATE TABLE delta_codec_string
|
||||
(
|
||||
id Float64 Codec(Delta, LZ4)
|
||||
) ENGINE MergeTree() ORDER BY tuple();
|
||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
CREATE TABLE default_codec_string
|
||||
(
|
||||
id Float64 Codec(LZ4)
|
||||
) ENGINE MergeTree() ORDER BY tuple();
|
||||
) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO delta_codec_string SELECT concat(toString(number), toString(number % 100)) FROM numbers(1547510400, 500000);
|
||||
INSERT INTO default_codec_string SELECT * from delta_codec_string;
|
||||
|
@ -9,6 +9,7 @@ CREATE TABLE zero_rows_per_granule (
|
||||
Sign Int8
|
||||
) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k
|
||||
SETTINGS index_granularity_bytes=20, min_index_granularity_bytes=10, write_final_mark = 0,
|
||||
min_bytes_for_wide_part = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
@ -40,6 +41,7 @@ CREATE TABLE four_rows_per_granule (
|
||||
Sign Int8
|
||||
) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k
|
||||
SETTINGS index_granularity_bytes=110, min_index_granularity_bytes=100, write_final_mark = 0,
|
||||
min_bytes_for_wide_part = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
|
@ -7,7 +7,7 @@ CREATE TABLE zero_rows_per_granule (
|
||||
k UInt64,
|
||||
v1 UInt64,
|
||||
v2 Int64
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0;
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -34,7 +34,7 @@ CREATE TABLE two_rows_per_granule (
|
||||
k UInt64,
|
||||
v1 UInt64,
|
||||
v2 Int64
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, min_index_granularity_bytes = 10, write_final_mark = 0;
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -61,7 +61,7 @@ CREATE TABLE four_rows_per_granule (
|
||||
k UInt64,
|
||||
v1 UInt64,
|
||||
v2 Int64
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0;
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -95,7 +95,7 @@ CREATE TABLE huge_granularity_small_blocks (
|
||||
k UInt64,
|
||||
v1 UInt64,
|
||||
v2 Int64
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0;
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -126,7 +126,7 @@ CREATE TABLE adaptive_granularity_alter (
|
||||
k UInt64,
|
||||
v1 UInt64,
|
||||
v2 Int64
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0;
|
||||
) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -185,7 +185,8 @@ CREATE TABLE zero_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0,
|
||||
min_bytes_for_wide_part = 0;
|
||||
|
||||
|
||||
INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
@ -219,7 +220,8 @@ CREATE TABLE two_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0,
|
||||
min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -252,7 +254,8 @@ CREATE TABLE four_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0,
|
||||
min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -290,7 +293,8 @@ CREATE TABLE huge_granularity_small_blocks (
|
||||
SETTINGS index_granularity_bytes=1000000, write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0,
|
||||
min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -327,7 +331,8 @@ CREATE TABLE adaptive_granularity_alter (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0,
|
||||
min_bytes_for_wide_part = 0;
|
||||
|
||||
|
||||
INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
@ -62,7 +62,7 @@ CREATE TABLE large_alter_table_00926 (
|
||||
somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
|
||||
id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
|
||||
data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4)
|
||||
) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity_bytes=40, min_index_granularity_bytes=30, write_final_mark = 0;
|
||||
) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS min_index_granularity_bytes=30, write_final_mark = 0, min_bytes_for_wide_part = '10M';
|
||||
|
||||
INSERT INTO large_alter_table_00926 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000;
|
||||
|
||||
|
@ -12,7 +12,7 @@ CREATE TABLE zero_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -45,7 +45,7 @@ CREATE TABLE two_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -78,7 +78,7 @@ CREATE TABLE four_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -124,7 +124,7 @@ CREATE TABLE huge_granularity_small_blocks (
|
||||
SETTINGS index_granularity_bytes=1000000, write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
@ -162,7 +162,7 @@ CREATE TABLE adaptive_granularity_alter (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000);
|
||||
|
||||
|
@ -14,7 +14,7 @@ CREATE TABLE zero_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1);
|
||||
|
||||
@ -48,7 +48,7 @@ CREATE TABLE four_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1);
|
||||
|
||||
@ -95,7 +95,7 @@ CREATE TABLE six_rows_per_granule (
|
||||
write_final_mark = 0,
|
||||
enable_vertical_merge_algorithm=1,
|
||||
vertical_merge_algorithm_min_rows_to_activate=0,
|
||||
vertical_merge_algorithm_min_columns_to_activate=0;
|
||||
vertical_merge_algorithm_min_columns_to_activate=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
|
||||
INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 1, 1000, 2000, -1, 2);
|
||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a);"
|
||||
$CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a) SETTINGS min_bytes_for_wide_part = 0;"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query="INSERT INTO small_table (n) SELECT * from system.numbers limit 100000;"
|
||||
$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE small_table FINAL;"
|
||||
|
@ -6,11 +6,11 @@
|
||||
2000-10-10 00:00:00 0
|
||||
2100-10-10 00:00:00 3
|
||||
2100-10-10 2
|
||||
CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() - 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() - 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192
|
||||
1 0
|
||||
CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() + 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL now() + 1000\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192
|
||||
1 1
|
||||
CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() - 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() - 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192
|
||||
1 0
|
||||
CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() + 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS index_granularity = 8192
|
||||
CREATE TABLE default.ttl_00933_1\n(\n `b` Int32,\n `a` Int32 TTL today() + 1\n)\nENGINE = MergeTree\nPARTITION BY tuple()\nORDER BY tuple()\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192
|
||||
1 1
|
||||
|
@ -1,6 +1,8 @@
|
||||
drop table if exists ttl_00933_1;
|
||||
|
||||
create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 second, b Int ttl d + interval 1 second) engine = MergeTree order by tuple() partition by toMinute(d);
|
||||
-- Column TTL works only with wide parts, because it's very expensive to apply it for compact parts
|
||||
|
||||
create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 second, b Int ttl d + interval 1 second) engine = MergeTree order by tuple() partition by toMinute(d) settings min_bytes_for_wide_part = 0;
|
||||
insert into ttl_00933_1 values (now(), 1, 2);
|
||||
insert into ttl_00933_1 values (now(), 3, 4);
|
||||
select sleep(1.1) format Null;
|
||||
@ -13,18 +15,16 @@ create table ttl_00933_1 (d DateTime, a Int, b Int) engine = MergeTree order by
|
||||
insert into ttl_00933_1 values (now(), 1, 2);
|
||||
insert into ttl_00933_1 values (now(), 3, 4);
|
||||
insert into ttl_00933_1 values (now() + 1000, 5, 6);
|
||||
select sleep(1.1) format Null;
|
||||
optimize table ttl_00933_1 final; -- check ttl merge for part with both expired and unexpired values
|
||||
select sleep(1.1) format Null; -- wait if very fast merge happen
|
||||
optimize table ttl_00933_1 final;
|
||||
select a, b from ttl_00933_1;
|
||||
|
||||
drop table if exists ttl_00933_1;
|
||||
|
||||
create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d);
|
||||
create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d) settings min_bytes_for_wide_part = 0;
|
||||
insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1);
|
||||
insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2);
|
||||
insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 3);
|
||||
select sleep(0.7) format Null; -- wait if very fast merge happen
|
||||
optimize table ttl_00933_1 final;
|
||||
select * from ttl_00933_1 order by d;
|
||||
|
||||
@ -34,7 +34,6 @@ create table ttl_00933_1 (d DateTime, a Int) engine = MergeTree order by tuple()
|
||||
insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1);
|
||||
insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2);
|
||||
insert into ttl_00933_1 values (toDateTime('2100-10-10 00:00:00'), 3);
|
||||
select sleep(0.7) format Null; -- wait if very fast merge happen
|
||||
optimize table ttl_00933_1 final;
|
||||
select * from ttl_00933_1 order by d;
|
||||
|
||||
@ -43,43 +42,38 @@ drop table if exists ttl_00933_1;
|
||||
create table ttl_00933_1 (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) ttl d + interval 1 day;
|
||||
insert into ttl_00933_1 values (toDate('2000-10-10'), 1);
|
||||
insert into ttl_00933_1 values (toDate('2100-10-10'), 2);
|
||||
select sleep(0.7) format Null; -- wait if very fast merge happen
|
||||
optimize table ttl_00933_1 final;
|
||||
select * from ttl_00933_1 order by d;
|
||||
|
||||
-- const DateTime TTL positive
|
||||
drop table if exists ttl_00933_1;
|
||||
create table ttl_00933_1 (b Int, a Int ttl now()-1000) engine = MergeTree order by tuple() partition by tuple();
|
||||
create table ttl_00933_1 (b Int, a Int ttl now()-1000) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0;
|
||||
show create table ttl_00933_1;
|
||||
insert into ttl_00933_1 values (1, 1);
|
||||
select sleep(0.7) format Null; -- wait if very fast merge happen
|
||||
optimize table ttl_00933_1 final;
|
||||
select * from ttl_00933_1;
|
||||
|
||||
-- const DateTime TTL negative
|
||||
drop table if exists ttl_00933_1;
|
||||
create table ttl_00933_1 (b Int, a Int ttl now()+1000) engine = MergeTree order by tuple() partition by tuple();
|
||||
create table ttl_00933_1 (b Int, a Int ttl now()+1000) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0;
|
||||
show create table ttl_00933_1;
|
||||
insert into ttl_00933_1 values (1, 1);
|
||||
select sleep(0.7) format Null; -- wait if very fast merge happen
|
||||
optimize table ttl_00933_1 final;
|
||||
select * from ttl_00933_1;
|
||||
|
||||
-- const Date TTL positive
|
||||
drop table if exists ttl_00933_1;
|
||||
create table ttl_00933_1 (b Int, a Int ttl today()-1) engine = MergeTree order by tuple() partition by tuple();
|
||||
create table ttl_00933_1 (b Int, a Int ttl today()-1) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0;
|
||||
show create table ttl_00933_1;
|
||||
insert into ttl_00933_1 values (1, 1);
|
||||
select sleep(0.7) format Null; -- wait if very fast merge happen
|
||||
optimize table ttl_00933_1 final;
|
||||
select * from ttl_00933_1;
|
||||
|
||||
-- const Date TTL negative
|
||||
drop table if exists ttl_00933_1;
|
||||
create table ttl_00933_1 (b Int, a Int ttl today()+1) engine = MergeTree order by tuple() partition by tuple();
|
||||
create table ttl_00933_1 (b Int, a Int ttl today()+1) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0;
|
||||
show create table ttl_00933_1;
|
||||
insert into ttl_00933_1 values (1, 1);
|
||||
select sleep(0.7) format Null; -- wait if very fast merge happen
|
||||
optimize table ttl_00933_1 final;
|
||||
select * from ttl_00933_1;
|
||||
|
||||
|
@ -1 +1 @@
|
||||
20000101_20000101_1_1_0 test_00961 1c63ae7a38eb76e2a71c28aaf0b3ae4d 0053df9b467cc5483e752ec62e91cfd4 da96ff1e527a8a1f908ddf2b1d0af239
|
||||
20000101_1_1_0 test_00961 5f2e2d4bbc14336f44037e3ac667f247 ed226557cd4e18ecf3ae06c6d5e6725c da96ff1e527a8a1f908ddf2b1d0af239
|
||||
|
@ -1,14 +1,15 @@
|
||||
DROP TABLE IF EXISTS test_00961;
|
||||
|
||||
CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, (a, b), 111);
|
||||
CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32)
|
||||
ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) SETTINGS index_granularity = 111, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789);
|
||||
|
||||
SELECT
|
||||
name,
|
||||
table,
|
||||
hash_of_all_files,
|
||||
hash_of_uncompressed_files,
|
||||
name,
|
||||
table,
|
||||
hash_of_all_files,
|
||||
hash_of_uncompressed_files,
|
||||
uncompressed_hash_of_compressed_files
|
||||
FROM system.parts
|
||||
WHERE table = 'test_00961' and database = currentDatabase();
|
||||
|
@ -1,5 +1,5 @@
|
||||
DROP TABLE IF EXISTS tab;
|
||||
create table tab (A Int64) Engine=MergeTree order by tuple();
|
||||
create table tab (A Int64) Engine=MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
insert into tab select cityHash64(number) from numbers(1000);
|
||||
select sum(sleep(0.1)) from tab settings max_block_size = 1, max_execution_time=1; -- { serverError 159 }
|
||||
DROP TABLE IF EXISTS tab;
|
||||
|
@ -1,7 +1,7 @@
|
||||
SET check_query_single_value_result = 0;
|
||||
DROP TABLE IF EXISTS check_query_test;
|
||||
|
||||
CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey;
|
||||
CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS min_bytes_for_wide_part = 0;
|
||||
|
||||
-- Number of rows in last granule should be equals to granularity.
|
||||
-- Rows in this table are short, so granularity will be 8192.
|
||||
@ -17,7 +17,7 @@ DROP TABLE IF EXISTS check_query_test;
|
||||
|
||||
DROP TABLE IF EXISTS check_query_test_non_adaptive;
|
||||
|
||||
CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0;
|
||||
CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0, min_bytes_for_wide_part = 0;
|
||||
|
||||
INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 81920;
|
||||
|
||||
|
@ -10,9 +10,9 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS s2"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS m"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS buf"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE s1 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE s2 (a UInt32, s String) ENGINE = MergeTree ORDER BY a PARTITION BY a % 3 SETTINGS min_bytes_for_wide_part = 0"
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE m (a UInt32, s String) engine = Merge(currentDatabase(), 's[1,2]')"
|
||||
$CLICKHOUSE_CLIENT -q "INSERT INTO s1 select (number % 20) * 2 as n, toString(number * number) from numbers(100000)"
|
||||
@ -45,7 +45,7 @@ else
|
||||
fi
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "SELECT '---MaterializedView---'"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW mv (a UInt32, s String) engine = MergeTree ORDER BY s POPULATE AS SELECT a, s FROM s1 WHERE a % 7 = 0"
|
||||
$CLICKHOUSE_CLIENT -q "CREATE MATERIALIZED VIEW mv (a UInt32, s String) engine = MergeTree ORDER BY s SETTINGS min_bytes_for_wide_part = 0 POPULATE AS SELECT a, s FROM s1 WHERE a % 7 = 0"
|
||||
$CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10"
|
||||
rows_read=$($CLICKHOUSE_CLIENT -q "SELECT a, s FROM mv ORDER BY s LIMIT 10 FORMAT JSON" --max_threads=1 --max_block_size=20 | grep "rows_read" | sed 's/[^0-9]*//g')
|
||||
|
||||
@ -59,4 +59,4 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS s1"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS s2"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS m"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS buf"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv"
|
||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mv"
|
||||
|
@ -3,9 +3,10 @@ set mutations_sync = 2;
|
||||
drop table if exists mt_compact;
|
||||
|
||||
create table mt_compact(a UInt64, b UInt64 DEFAULT a * a, s String, n Nested(x UInt32, y String), lc LowCardinality(String))
|
||||
engine = MergeTree
|
||||
engine = MergeTree
|
||||
order by a partition by a % 10
|
||||
settings index_granularity = 8,
|
||||
min_bytes_for_wide_part = 0,
|
||||
min_rows_for_wide_part = 10;
|
||||
|
||||
insert into mt_compact (a, s, n.y, lc) select number, toString((number * 2132214234 + 5434543) % 2133443), ['a', 'b', 'c'], number % 2 ? 'bar' : 'baz' from numbers(90);
|
||||
|
@ -1,5 +1,5 @@
|
||||
DROP TABLE IF EXISTS test_01343;
|
||||
CREATE TABLE test_01343 (x String) ENGINE = MergeTree ORDER BY tuple();
|
||||
CREATE TABLE test_01343 (x String) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
INSERT INTO test_01343 VALUES ('Hello, world');
|
||||
|
||||
SET min_bytes_to_use_mmap_io = 1;
|
||||
|
@ -1,5 +1,5 @@
|
||||
DROP TABLE IF EXISTS test_01344;
|
||||
CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple();
|
||||
CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0;
|
||||
INSERT INTO test_01344 VALUES ('Hello, world');
|
||||
|
||||
SET min_bytes_to_use_mmap_io = 1;
|
||||
|
@ -1,6 +1,6 @@
|
||||
DROP TABLE IF EXISTS fixed_granularity_table;
|
||||
|
||||
CREATE TABLE fixed_granularity_table (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0; -- looks like default table before update
|
||||
CREATE TABLE fixed_granularity_table (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0, min_bytes_for_wide_part = 0; -- looks like default table before update
|
||||
|
||||
ALTER TABLE fixed_granularity_table REPLACE PARTITION 201403 FROM test.hits;
|
||||
|
||||
@ -29,7 +29,7 @@ ALTER TABLE test.hits ATTACH PARTITION 201403;
|
||||
|
||||
DROP TABLE IF EXISTS hits_copy;
|
||||
|
||||
CREATE TABLE hits_copy (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0;
|
||||
CREATE TABLE hits_copy (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0, min_bytes_for_wide_part = 0;
|
||||
|
||||
ALTER TABLE hits_copy REPLACE PARTITION 201403 FROM test.hits;
|
||||
|
||||
|
@ -102,31 +102,5 @@
|
||||
/// Internal distionary name is different
|
||||
"01225_show_create_table_from_dictionary",
|
||||
"01224_no_superfluous_dict_reload"
|
||||
],
|
||||
"polymorphic-parts": [
|
||||
/// These tests fail with compact parts, because they
|
||||
/// check some implementation defined things
|
||||
/// like checksums, computed granularity, ProfileEvents, etc.
|
||||
"01045_order_by_pk_special_storages",
|
||||
"01042_check_query_and_last_granule_size",
|
||||
"00961_checksums_in_system_parts_columns_table",
|
||||
"00933_test_fix_extra_seek_on_compressed_cache",
|
||||
"00926_adaptive_index_granularity_collapsing_merge_tree",
|
||||
"00926_adaptive_index_granularity_merge_tree",
|
||||
"00926_adaptive_index_granularity_replacing_merge_tree",
|
||||
"00926_adaptive_index_granularity_versioned_collapsing_merge_tree",
|
||||
"00804_test_delta_codec_compression",
|
||||
"00731_long_merge_tree_select_opened_files",
|
||||
"00653_verification_monotonic_data_load",
|
||||
"00484_preferred_max_column_in_block_size_bytes",
|
||||
"00446_clear_column_in_partition_zookeeper",
|
||||
"00443_preferred_block_size_bytes",
|
||||
"00160_merge_and_index_in_in",
|
||||
"01055_compact_parts",
|
||||
"01039_mergetree_exec_time",
|
||||
"00933_ttl_simple", /// Maybe it's worth to fix it
|
||||
"00753_system_columns_and_system_tables",
|
||||
"01343_min_bytes_to_use_mmap_io",
|
||||
"01344_min_bytes_to_use_mmap_io_index"
|
||||
]
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user