Deprecate in-memory parts

This commit is contained in:
Alexey Milovidov 2023-05-03 00:31:09 +02:00
parent 03aa4f7f8a
commit f449df85b6
51 changed files with 31 additions and 820 deletions

View File

@ -188,10 +188,8 @@
\
M(InsertedWideParts, "Number of parts inserted in Wide format.") \
M(InsertedCompactParts, "Number of parts inserted in Compact format.") \
M(InsertedInMemoryParts, "Number of parts inserted in InMemory format.") \
M(MergedIntoWideParts, "Number of parts merged into Wide format.") \
M(MergedIntoCompactParts, "Number of parts merged into Compact format.") \
M(MergedIntoInMemoryParts, "Number of parts in merged into InMemory format.") \
\
M(MergeTreeDataProjectionWriterRows, "Number of rows INSERTed to MergeTree tables projection.") \
M(MergeTreeDataProjectionWriterUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables projection.") \

View File

@ -117,10 +117,8 @@ namespace ProfileEvents
extern const Event DelayedInsertsMilliseconds;
extern const Event InsertedWideParts;
extern const Event InsertedCompactParts;
extern const Event InsertedInMemoryParts;
extern const Event MergedIntoWideParts;
extern const Event MergedIntoCompactParts;
extern const Event MergedIntoInMemoryParts;
}
namespace CurrentMetrics
@ -380,8 +378,7 @@ MergeTreeData::MergeTreeData(
String reason;
if (!canUsePolymorphicParts(*settings, &reason) && !reason.empty())
LOG_WARNING(log, "{} Settings 'min_rows_for_wide_part', 'min_bytes_for_wide_part', "
"'min_rows_for_compact_part' and 'min_bytes_for_compact_part' will be ignored.", reason);
LOG_WARNING(log, "{} Settings 'min_rows_for_wide_part'and 'min_bytes_for_wide_part' will be ignored.", reason);
#if !USE_ROCKSDB
if (use_metadata_cache)
@ -2304,22 +2301,6 @@ void MergeTreeData::removePartsFinally(const MergeTreeData::DataPartsVector & pa
}
}
void MergeTreeData::flushAllInMemoryPartsIfNeeded()
{
if (getSettings()->in_memory_parts_enable_wal)
return;
auto metadata_snapshot = getInMemoryMetadataPtr();
DataPartsVector parts = getDataPartsVectorForInternalUsage();
for (const auto & part : parts)
{
if (auto part_in_memory = asInMemoryPart(part))
{
part_in_memory->flushToDisk(part_in_memory->getDataPartStorage().getPartDirectory(), metadata_snapshot);
}
}
}
size_t MergeTreeData::clearOldPartsFromFilesystem(bool force)
{
DataPartsVector parts_to_remove = grabOldParts(force);
@ -3290,7 +3271,7 @@ void MergeTreeData::checkMutationIsPossible(const MutationCommands & /*commands*
/// Some validation will be added
}
MergeTreeDataPartFormat MergeTreeData::choosePartFormat(size_t bytes_uncompressed, size_t rows_count, bool only_on_disk) const
MergeTreeDataPartFormat MergeTreeData::choosePartFormat(size_t bytes_uncompressed, size_t rows_count) const
{
using PartType = MergeTreeDataPartType;
using PartStorageType = MergeTreeDataPartStorageType;
@ -3304,9 +3285,6 @@ MergeTreeDataPartFormat MergeTreeData::choosePartFormat(size_t bytes_uncompresse
return bytes_uncompressed < min_bytes_for || rows_count < min_rows_for;
};
if (!only_on_disk && satisfies(settings->min_bytes_for_compact_part, settings->min_rows_for_compact_part))
return {PartType::InMemory, PartStorageType::Full};
auto part_type = PartType::Wide;
if (satisfies(settings->min_bytes_for_wide_part, settings->min_rows_for_wide_part))
part_type = PartType::Compact;
@ -3316,7 +3294,7 @@ MergeTreeDataPartFormat MergeTreeData::choosePartFormat(size_t bytes_uncompresse
MergeTreeDataPartFormat MergeTreeData::choosePartFormatOnDisk(size_t bytes_uncompressed, size_t rows_count) const
{
return choosePartFormat(bytes_uncompressed, rows_count, true);
return choosePartFormat(bytes_uncompressed, rows_count);
}
MergeTreeDataPartBuilder MergeTreeData::getDataPartBuilder(
@ -6049,19 +6027,6 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData:
}
}
MergeTreeData::WriteAheadLogPtr wal;
auto get_inited_wal = [&] ()
{
if (!wal)
wal = data.getWriteAheadLog();
return wal;
};
if (settings->in_memory_parts_enable_wal)
for (const auto & part : precommitted_parts)
if (auto part_in_memory = asInMemoryPart(part))
get_inited_wal()->addPart(part_in_memory);
NOEXCEPT_SCOPE({
auto current_time = time(nullptr);
@ -6105,10 +6070,6 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData:
data.modifyPartState(covered_part, DataPartState::Outdated);
data.removePartContributionToColumnAndSecondaryIndexSizes(covered_part);
if (settings->in_memory_parts_enable_wal)
if (isInMemoryPart(covered_part))
get_inited_wal()->dropPart(covered_part->name);
}
reduce_parts += covered_parts.size();
@ -7789,11 +7750,8 @@ bool MergeTreeData::canUsePolymorphicParts(const MergeTreeSettings & settings, S
"Table can't create parts with adaptive granularity, but settings"
" min_rows_for_wide_part = {}"
", min_bytes_for_wide_part = {}"
", min_rows_for_compact_part = {}"
", min_bytes_for_compact_part = {}"
". Parts with non-adaptive granularity can be stored only in Wide (default) format.",
settings.min_rows_for_wide_part, settings.min_bytes_for_wide_part,
settings.min_rows_for_compact_part, settings.min_bytes_for_compact_part);
settings.min_rows_for_wide_part, settings.min_bytes_for_wide_part);
}
return false;
@ -8173,9 +8131,6 @@ void MergeTreeData::incrementInsertedPartsProfileEvent(MergeTreeDataPartType typ
case MergeTreeDataPartType::Compact:
ProfileEvents::increment(ProfileEvents::InsertedCompactParts);
break;
case MergeTreeDataPartType::InMemory:
ProfileEvents::increment(ProfileEvents::InsertedInMemoryParts);
break;
default:
break;
}
@ -8191,9 +8146,6 @@ void MergeTreeData::incrementMergedPartsProfileEvent(MergeTreeDataPartType type)
case MergeTreeDataPartType::Compact:
ProfileEvents::increment(ProfileEvents::MergedIntoCompactParts);
break;
case MergeTreeDataPartType::InMemory:
ProfileEvents::increment(ProfileEvents::MergedIntoInMemoryParts);
break;
default:
break;
}

View File

@ -225,7 +225,7 @@ public:
using OperationDataPartsLock = std::unique_lock<std::mutex>;
OperationDataPartsLock lockOperationsWithParts() const { return OperationDataPartsLock(operation_with_data_parts_mutex); }
MergeTreeDataPartFormat choosePartFormat(size_t bytes_uncompressed, size_t rows_count, bool only_on_disk = false) const;
MergeTreeDataPartFormat choosePartFormat(size_t bytes_uncompressed, size_t rows_count) const;
MergeTreeDataPartFormat choosePartFormatOnDisk(size_t bytes_uncompressed, size_t rows_count) const;
MergeTreeDataPartBuilder getDataPartBuilder(const String & name, const VolumePtr & volume, const String & part_dir) const;
@ -651,9 +651,6 @@ public:
/// Removes parts from data_parts, they should be in Deleting state
void removePartsFinally(const DataPartsVector & parts);
/// When WAL is not enabled, the InMemoryParts need to be persistent.
void flushAllInMemoryPartsIfNeeded();
/// Delete irrelevant parts from memory and disk.
/// If 'force' - don't wait for old_parts_lifetime.
size_t clearOldPartsFromFilesystem(bool force = false);

View File

@ -44,7 +44,7 @@ public:
/// Data of all columns is stored in one file. Marks are also stored in single file.
Compact,
/// Format with buffering data in RAM.
/// Format with buffering data in RAM. Obsolete - new parts cannot be created in this format.
InMemory,
Unknown,

View File

@ -33,10 +33,6 @@ struct Settings;
/** Data storing format settings. */ \
M(UInt64, min_bytes_for_wide_part, 10485760, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \
M(UInt64, min_rows_for_wide_part, 0, "Minimal number of rows to create part in wide format instead of compact", 0) \
M(UInt64, min_bytes_for_compact_part, 0, "Experimental. Minimal uncompressed size in bytes to create part in compact format instead of saving it in RAM", 0) \
M(UInt64, min_rows_for_compact_part, 0, "Experimental. Minimal number of rows to create part in compact format instead of saving it in RAM", 0) \
M(Bool, in_memory_parts_enable_wal, true, "Whether to write blocks in Native format to write-ahead-log before creation in-memory part", 0) \
M(UInt64, write_ahead_log_max_bytes, 1024 * 1024 * 1024, "Rotate WAL, if it exceeds that amount of bytes", 0) \
M(Float, ratio_of_defaults_for_sparse_serialization, 1.0, "Minimal ratio of number of default values to number of all values in column to store it in sparse serializations. If >= 1, columns will be always written in full serialization.", 0) \
\
/** Merge settings. */ \
@ -59,9 +55,6 @@ struct Settings;
M(UInt64, min_compressed_bytes_to_fsync_after_fetch, 0, "Minimal number of compressed bytes to do fsync for part after fetch (0 - disabled)", 0) \
M(Bool, fsync_after_insert, false, "Do fsync for every inserted part. Significantly decreases performance of inserts, not recommended to use with wide parts.", 0) \
M(Bool, fsync_part_directory, false, "Do fsync for part directory after all part operations (writes, renames, etc.).", 0) \
M(UInt64, write_ahead_log_bytes_to_fsync, 100ULL * 1024 * 1024, "Amount of bytes, accumulated in WAL to do fsync.", 0) \
M(UInt64, write_ahead_log_interval_ms_to_fsync, 100, "Interval in milliseconds after which fsync for WAL is being done.", 0) \
M(Bool, in_memory_parts_insert_sync, false, "If true insert of part with in-memory format will wait for fsync of WAL", 0) \
M(UInt64, non_replicated_deduplication_window, 0, "How many last blocks of hashes should be kept on disk (0 - disabled).", 0) \
M(UInt64, max_parts_to_merge_at_once, 100, "Max amount of parts which can be merged at once (0 - disabled). Doesn't affect OPTIMIZE FINAL query.", 0) \
M(UInt64, merge_selecting_sleep_ms, 5000, "Sleep time for merge selecting when no part selected, a lower setting will trigger selecting tasks in background_schedule_pool frequently which result in large amount of requests to zookeeper in large-scale clusters", 0) \
@ -183,7 +176,14 @@ struct Settings;
M(UInt64, replicated_max_parallel_sends_for_table, 0, "Obsolete setting, does nothing.", 0) \
M(UInt64, replicated_max_parallel_fetches, 0, "Obsolete setting, does nothing.", 0) \
M(UInt64, replicated_max_parallel_fetches_for_table, 0, "Obsolete setting, does nothing.", 0) \
M(Bool, write_final_mark, true, "Obsolete setting, does nothing.", 0) \
M(Bool, write_final_mark, true, "Obsolete setting, does nothing.", 0) \
M(UInt64, min_bytes_for_compact_part, 0, "Obsolete setting, does nothing.", 0) \
M(UInt64, min_rows_for_compact_part, 0, "Obsolete setting, does nothing.", 0) \
M(Bool, in_memory_parts_enable_wal, true, "Obsolete setting, does nothing.", 0) \
M(UInt64, write_ahead_log_max_bytes, 1024 * 1024 * 1024, "Obsolete setting, does nothing.", 0) \
M(UInt64, write_ahead_log_bytes_to_fsync, 100ULL * 1024 * 1024, "Obsolete setting, does nothing.", 0) \
M(UInt64, write_ahead_log_interval_ms_to_fsync, 100, "Obsolete setting, does nothing.", 0) \
M(Bool, in_memory_parts_insert_sync, false, "Obsolete setting, does nothing.", 0) \
/// Settings that should not change after the creation of a table.
/// NOLINTNEXTLINE
#define APPLY_FOR_IMMUTABLE_MERGE_TREE_SETTINGS(M) \
@ -211,8 +211,7 @@ struct MergeTreeSettings : public BaseSettings<MergeTreeSettingsTraits>
static bool isPartFormatSetting(const String & name)
{
return name == "min_bytes_for_wide_part" || name == "min_rows_for_wide_part"
|| name == "min_bytes_for_compact_part" || name == "min_rows_for_compact_part";
return name == "min_bytes_for_wide_part" || name == "min_rows_for_wide_part";
}
/// Check that the values are sane taking also query-level settings into account.

View File

@ -84,31 +84,6 @@ void MergeTreeWriteAheadLog::init()
bytes_at_last_sync = 0;
}
void MergeTreeWriteAheadLog::addPart(DataPartInMemoryPtr & part)
{
std::unique_lock lock(write_mutex);
auto part_info = MergeTreePartInfo::fromPartName(part->name, storage.format_version);
min_block_number = std::min(min_block_number, part_info.min_block);
max_block_number = std::max(max_block_number, part_info.max_block);
writeIntBinary(WAL_VERSION, *out);
ActionMetadata metadata{};
metadata.part_uuid = part->uuid;
metadata.write(*out);
writeIntBinary(static_cast<UInt8>(ActionType::ADD_PART), *out);
writeStringBinary(part->name, *out);
block_out->write(part->block);
block_out->flush();
sync(lock);
auto max_wal_bytes = storage.getSettings()->write_ahead_log_max_bytes;
if (out->count() > max_wal_bytes)
rotate(lock);
}
void MergeTreeWriteAheadLog::dropPart(const String & part_name)
{
std::unique_lock lock(write_mutex);
@ -121,7 +96,6 @@ void MergeTreeWriteAheadLog::dropPart(const String & part_name)
writeIntBinary(static_cast<UInt8>(ActionType::DROP_PART), *out);
writeStringBinary(part_name, *out);
out->next();
sync(lock);
}
void MergeTreeWriteAheadLog::rotate(const std::unique_lock<std::mutex> &)
@ -269,27 +243,6 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore(
return result;
}
void MergeTreeWriteAheadLog::sync(std::unique_lock<std::mutex> & lock)
{
size_t bytes_to_sync = storage.getSettings()->write_ahead_log_bytes_to_fsync;
time_t time_to_sync = storage.getSettings()->write_ahead_log_interval_ms_to_fsync;
size_t current_bytes = out->count();
if (bytes_to_sync && current_bytes - bytes_at_last_sync > bytes_to_sync)
{
sync_task->schedule();
bytes_at_last_sync = current_bytes;
}
else if (time_to_sync && !sync_scheduled)
{
sync_task->scheduleAfter(time_to_sync);
sync_scheduled = true;
}
if (storage.getSettings()->in_memory_parts_insert_sync)
sync_cv.wait(lock, [this] { return !sync_scheduled; });
}
void MergeTreeWriteAheadLog::shutdown()
{
{

View File

@ -167,14 +167,6 @@ void StorageMergeTree::startup()
}
}
void StorageMergeTree::flush()
{
if (flush_called.exchange(true))
return;
flushAllInMemoryPartsIfNeeded();
}
void StorageMergeTree::shutdown()
{
if (shutdown_called.exchange(true))

View File

@ -45,7 +45,6 @@ public:
bool has_force_restore_data_flag);
void startup() override;
void flush() override;
void shutdown() override;
~StorageMergeTree() override;

View File

@ -4421,14 +4421,6 @@ void StorageReplicatedMergeTree::startupImpl(bool from_attach_thread)
}
}
void StorageReplicatedMergeTree::flush()
{
if (flush_called.exchange(true))
return;
flushAllInMemoryPartsIfNeeded();
}
void StorageReplicatedMergeTree::partialShutdown()
{

View File

@ -113,7 +113,6 @@ public:
void startup() override;
void shutdown() override;
void partialShutdown();
void flush() override;
~StorageReplicatedMergeTree() override;
static String getDefaultZooKeeperPath(const Poco::Util::AbstractConfiguration & config);

View File

@ -171,39 +171,3 @@ def test_replace_partition(started_cluster):
assert TSV(res) == expected
instance.query("DROP TABLE IF EXISTS test.tbl3")
def test_freeze_in_memory(started_cluster):
instance.query(
"CREATE TABLE test.t_in_memory(a UInt32, s String) ENGINE = MergeTree ORDER BY a SETTINGS min_rows_for_compact_part = 1000"
)
instance.query("INSERT INTO test.t_in_memory VALUES (1, 'a')")
instance.query("ALTER TABLE test.t_in_memory FREEZE")
fp_backup = get_last_backup_path(
started_cluster.instances["node"], "test", "t_in_memory"
)
part_path = fp_backup + "/all_1_1_0/"
assert TSV(
instance.query(
"SELECT part_type, is_frozen FROM system.parts WHERE database = 'test' AND table = 't_in_memory'"
)
) == TSV("InMemory\t1\n")
instance.exec_in_container(["test", "-f", part_path + "/data.bin"])
assert instance.exec_in_container(["cat", part_path + "/count.txt"]).strip() == "1"
instance.query(
"CREATE TABLE test.t_in_memory_2(a UInt32, s String) ENGINE = MergeTree ORDER BY a"
)
copy_backup_to_detached(
started_cluster.instances["node"], "test", "t_in_memory", "t_in_memory_2"
)
instance.query("ALTER TABLE test.t_in_memory_2 ATTACH PARTITION ID 'all'")
assert TSV(
instance.query(
"SELECT part_type FROM system.parts WHERE database = 'test' AND table = 't_in_memory_2'"
)
) == TSV("Compact\n")
assert TSV(instance.query("SELECT a, s FROM test.t_in_memory_2")) == TSV("1\ta\n")

View File

@ -49,9 +49,8 @@ def create_tables(name, nodes, node_settings, shard):
PARTITION BY toYYYYMM(date)
ORDER BY id
SETTINGS index_granularity = 64, index_granularity_bytes = {index_granularity_bytes},
min_rows_for_wide_part = {min_rows_for_wide_part}, min_rows_for_compact_part = {min_rows_for_compact_part},
min_bytes_for_wide_part = 0, min_bytes_for_compact_part = 0,
in_memory_parts_enable_wal = 1
min_rows_for_wide_part = {min_rows_for_wide_part},
min_bytes_for_wide_part = 0
""".format(
name=name, shard=shard, repl=i, **settings
)
@ -87,17 +86,14 @@ node2 = cluster.add_instance(
settings_default = {
"index_granularity_bytes": 10485760,
"min_rows_for_wide_part": 512,
"min_rows_for_compact_part": 0,
}
settings_compact_only = {
"index_granularity_bytes": 10485760,
"min_rows_for_wide_part": 1000000,
"min_rows_for_compact_part": 0,
}
settings_not_adaptive = {
"index_granularity_bytes": 0,
"min_rows_for_wide_part": 512,
"min_rows_for_compact_part": 0,
}
node3 = cluster.add_instance(
@ -116,12 +112,10 @@ node4 = cluster.add_instance(
settings_compact = {
"index_granularity_bytes": 10485760,
"min_rows_for_wide_part": 512,
"min_rows_for_compact_part": 0,
}
settings_wide = {
"index_granularity_bytes": 10485760,
"min_rows_for_wide_part": 0,
"min_rows_for_compact_part": 0,
}
node5 = cluster.add_instance(
@ -131,12 +125,6 @@ node6 = cluster.add_instance(
"node6", main_configs=["configs/compact_parts.xml"], with_zookeeper=True
)
settings_in_memory = {
"index_granularity_bytes": 10485760,
"min_rows_for_wide_part": 512,
"min_rows_for_compact_part": 256,
}
node9 = cluster.add_instance("node9", with_zookeeper=True, stay_alive=True)
node10 = cluster.add_instance("node10", with_zookeeper=True)
@ -190,42 +178,6 @@ def start_cluster():
"shard2",
)
create_tables_old_format("polymorphic_table", [node5, node6], "shard3")
create_tables(
"in_memory_table",
[node9, node10],
[settings_in_memory, settings_in_memory],
"shard4",
)
create_tables(
"wal_table",
[node11, node12],
[settings_in_memory, settings_in_memory],
"shard4",
)
create_tables(
"restore_table",
[node11, node12],
[settings_in_memory, settings_in_memory],
"shard5",
)
create_tables(
"deduplication_table",
[node9, node10],
[settings_in_memory, settings_in_memory],
"shard5",
)
create_tables(
"sync_table",
[node9, node10],
[settings_in_memory, settings_in_memory],
"shard5",
)
create_tables(
"alters_table",
[node9, node10],
[settings_in_memory, settings_in_memory],
"shard5",
)
yield cluster
@ -422,7 +374,6 @@ settings7 = {"index_granularity_bytes": 10485760}
settings8 = {
"index_granularity_bytes": 10485760,
"min_rows_for_wide_part": 512,
"min_rows_for_compact_part": 0,
}

View File

@ -13,13 +13,6 @@
SAMPLE BY intHash32(UserID)
SETTINGS min_bytes_for_wide_part = '10M'
</create_query>
<create_query>
CREATE TABLE hits_memory AS hits_10m_single ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID)
SETTINGS min_bytes_for_compact_part = '1M', min_bytes_for_wide_part = '10M', in_memory_parts_enable_wal = 1
</create_query>
<create_query>
CREATE TABLE hits_buffer AS hits_10m_single
ENGINE = Buffer(default, hits_wide, 1, 0, 0, 10000, 10000, 0, 0)
@ -38,6 +31,5 @@
<drop_query>DROP TABLE IF EXISTS hits_wide</drop_query>
<drop_query>DROP TABLE IF EXISTS hits_compact</drop_query>
<drop_query>DROP TABLE IF EXISTS hits_memory</drop_query>
<drop_query>DROP TABLE IF EXISTS hits_buffer</drop_query>
</test>

View File

@ -1,39 +0,0 @@
system.parts
InMemory 2
1
1
Simple selects
0 0
1 1
2 2
3 0
4 1
50 2
51 0
52 1
53 2
54 0
34
0
20
10
Mutations and Alters
66
1 1
2 2
4 1
5 2
7 1
[1,1]
[]
[4,16]
[]
[7,49]
1 1
2 1
1 [1,1]
2 []
4 [4,16]
5 []
7 [7,49]
0

View File

@ -1,48 +0,0 @@
-- Tags: no-s3-storage
DROP TABLE IF EXISTS in_memory;
CREATE TABLE in_memory (a UInt32, b UInt32)
ENGINE = MergeTree ORDER BY a
SETTINGS min_rows_for_compact_part = 1000, min_rows_for_compact_part = 1000;
INSERT INTO in_memory SELECT number, number % 3 FROM numbers(100);
SELECT 'system.parts';
SELECT DISTINCT part_type, marks FROM system.parts WHERE database = currentDatabase() AND table = 'in_memory' AND active;
SELECT DISTINCT data_uncompressed_bytes > 0 FROM system.parts WHERE database = currentDatabase() AND table = 'in_memory' AND active;
SELECT DISTINCT column_data_uncompressed_bytes > 0 FROM system.parts_columns WHERE database = currentDatabase() AND table = 'in_memory' AND active;
SELECT 'Simple selects';
SELECT * FROM in_memory ORDER BY a LIMIT 5;
SELECT * FROM in_memory ORDER BY a LIMIT 5 OFFSET 50;
SELECT count() FROM in_memory WHERE b = 0 SETTINGS max_block_size = 10;
-- Check index
SELECT count() FROM in_memory WHERE a > 100 SETTINGS max_rows_to_read = 0, force_primary_key = 1;
SELECT count() FROM in_memory WHERE a >= 10 AND a < 30 SETTINGS force_primary_key = 1;
SELECT DISTINCT blockSize() FROM in_memory SETTINGS max_block_size = 10;
SELECT 'Mutations and Alters';
SET mutations_sync = 1;
ALTER TABLE in_memory DELETE WHERE b = 0;
SELECT count() FROM in_memory;
SELECT * FROM in_memory ORDER BY a LIMIT 5;
ALTER TABLE in_memory ADD COLUMN arr Array(UInt64);
ALTER TABLE in_memory UPDATE arr = [a, a * a] WHERE b = 1;
SELECT arr FROM in_memory ORDER BY a LIMIT 5;
ALTER TABLE in_memory MODIFY COLUMN b String;
ALTER TABLE in_memory RENAME COLUMN b to str;
SELECT DISTINCT str, length(str) FROM in_memory ORDER BY str;
ALTER TABLE in_memory DROP COLUMN str;
SELECT * FROM in_memory ORDER BY a LIMIT 5;
-- in-memory parts works if they're empty.
ALTER TABLE in_memory DELETE WHERE 1;
SELECT count() FROM in_memory;
DROP TABLE in_memory;

View File

@ -1 +0,0 @@
201901_1_1_0 1

View File

@ -1,13 +0,0 @@
-- Tags: no-s3-storage
-- Part of 00961_check_table test, but with in-memory parts
SET check_query_single_value_result = 0;
DROP TABLE IF EXISTS mt_table;
CREATE TABLE mt_table (d Date, key UInt64, data String) ENGINE = MergeTree() PARTITION BY toYYYYMM(d) ORDER BY key
SETTINGS min_rows_for_compact_part = 1000, min_rows_for_compact_part = 1000;
CHECK TABLE mt_table;
INSERT INTO mt_table VALUES (toDate('2019-01-02'), 1, 'Hello'), (toDate('2019-01-02'), 2, 'World');
CHECK TABLE mt_table;
DROP TABLE mt_table;

View File

@ -1,21 +0,0 @@
-- Tags: no-s3-storage
-- Test 01266_default_prewhere_reqq, but with in-memory parts
DROP TABLE IF EXISTS t1;
CREATE TABLE t1
(
date Date,
s1 String,
s2 String
) ENGINE = MergeTree() PARTITION BY toYYYYMMDD(date) ORDER BY (date, s1)
SETTINGS index_granularity = 8192, min_rows_for_compact_part = 1000, min_rows_for_wide_part = 1000;
set max_threads=1;
insert into t1 (date, s1,s2) values(today()-1,'aaa','bbb');
alter table t1 add column s3 String DEFAULT concat(s2,'_',s1);
-- insert into t1 (date, s1,s2) values(today(),'aaa2','bbb2');
select ignore(date), s3 from t1 where s2='bbb';
DROP TABLE t1;

View File

@ -1,25 +0,0 @@
[0]
[0,0,0]
[0,0,0,0,0]
[0,0,0,0,0,0,0]
[0,0,0,0,0,0,0,0,0]
[0]
[0,0,0]
[0,0,0,0,0]
[0,0,0,0,0,0,0]
[0,0,0,0,0,0,0,0,0]
[0]
[0,0,0]
[0,0,0,0,0]
[0,0,0,0,0,0,0]
[0,0,0,0,0,0,0,0,0]
[0]
[0,2,4]
[0,2,4,6,8]
[0,2,4,6,8,10,12]
[0,2,4,6,8,10,12,14,16]
[0] [0]
[0,1,2] [0,2,4]
[0,1,2,3,4] [0,2,4,6,8]
[0,1,2,3,4,5,6] [0,2,4,6,8,10,12]
[0,1,2,3,4,5,6,7,8] [0,2,4,6,8,10,12,14,16]

View File

@ -1,19 +0,0 @@
-- Tags: no-s3-storage
-- Test 00576_nested_and_prewhere, but with in-memory parts.
DROP TABLE IF EXISTS nested;
CREATE TABLE nested (x UInt64, filter UInt8, n Nested(a UInt64)) ENGINE = MergeTree ORDER BY x
SETTINGS min_rows_for_compact_part = 200000, min_rows_for_wide_part = 300000;
INSERT INTO nested SELECT number, number % 2, range(number % 10) FROM system.numbers LIMIT 100000;
ALTER TABLE nested ADD COLUMN n.b Array(UInt64);
SELECT DISTINCT n.b FROM nested PREWHERE filter;
SELECT DISTINCT n.b FROM nested PREWHERE filter SETTINGS max_block_size = 123;
SELECT DISTINCT n.b FROM nested PREWHERE filter SETTINGS max_block_size = 1234;
ALTER TABLE nested ADD COLUMN n.c Array(UInt64) DEFAULT arrayMap(x -> x * 2, n.a);
SELECT DISTINCT n.c FROM nested PREWHERE filter;
SELECT DISTINCT n.a, n.c FROM nested PREWHERE filter;
DROP TABLE nested;

View File

@ -1,60 +0,0 @@
1 2 foo
1 3 bar
2 4 aa
2 5 bb
2 6 cc
3 7 qq
3 8 ww
3 9 ee
3 10 rr
1_1_1_0 InMemory 2
2_2_2_0 InMemory 3
3_3_3_0 InMemory 4
^ init ==================
2 4 aa
2 5 bb
2 6 cc
3 7 qq
3 8 ww
3 9 ee
3 10 rr
2_2_2_0 InMemory 3
3_3_3_0 InMemory 4
^ drop 1 ==================
3 7 qq
3 8 ww
3 9 ee
3 10 rr
3_3_3_0 InMemory 4
^ detach 2 ==================
2 4 aa
2 5 bb
2 6 cc
3 7 qq
3 8 ww
3 9 ee
3 10 rr
2_4_4_0 Compact 3
3_3_3_0 InMemory 4
^ attach 2 =================
2 4 aa
2 5 bb
2 6 cc
3 7 qq
3 8 ww
3 9 ee
3 10 rr
2_4_4_0 Compact 3
3_3_3_0 InMemory 4
^ detach attach ==================
2 4 aa
2 5 bb
2 6 cc
3 11 tt
3 12 yy
t2 2_4_4_0 Compact 3
t2 3_6_6_0 Compact 2
t3 3_1_1_0 InMemory 2
^ replace ==================
3_1_1_0 InMemory 1 2
^ freeze ==================

View File

@ -1,59 +0,0 @@
-- Tags: no-parallel, no-s3-storage
DROP TABLE IF EXISTS t2;
CREATE TABLE t2(id UInt32, a UInt64, s String)
ENGINE = MergeTree ORDER BY a PARTITION BY id
SETTINGS min_rows_for_compact_part = 1000, min_rows_for_wide_part = 2000;
SYSTEM STOP MERGES t2;
INSERT INTO t2 VALUES (1, 2, 'foo'), (1, 3, 'bar');
INSERT INTO t2 VALUES (2, 4, 'aa'), (2, 5, 'bb'), (2, 6, 'cc');
INSERT INTO t2 VALUES (3, 7, 'qq'), (3, 8, 'ww'), (3, 9, 'ee'), (3, 10, 'rr');
SELECT * FROM t2 ORDER BY a;
SELECT name, part_type, rows FROM system.parts WHERE table = 't2' AND active AND database = currentDatabase() ORDER BY name;
SELECT '^ init ==================';
ALTER TABLE t2 DROP PARTITION 1;
SELECT * FROM t2 ORDER BY a;
SELECT name, part_type, rows FROM system.parts WHERE table = 't2' AND active AND database = currentDatabase() ORDER BY name;
SELECT '^ drop 1 ==================';
ALTER TABLE t2 DETACH PARTITION 2;
SELECT * FROM t2 ORDER BY a;
SELECT name, part_type, rows FROM system.parts WHERE table = 't2' AND active AND database = currentDatabase() ORDER BY name;
SELECT '^ detach 2 ==================';
ALTER TABLE t2 ATTACH PARTITION 2;
SELECT * FROM t2 ORDER BY a;
SELECT name, part_type, rows FROM system.parts WHERE table = 't2' AND active AND database = currentDatabase() ORDER BY name;
SELECT '^ attach 2 =================';
DETACH TABLE t2;
ATTACH TABLE t2;
SELECT * FROM t2 ORDER BY a;
SELECT name, part_type, rows FROM system.parts WHERE table = 't2' AND active AND database = currentDatabase() ORDER BY name;
SELECT '^ detach attach ==================';
DROP TABLE IF EXISTS t3;
CREATE TABLE t3(id UInt32, a UInt64, s String)
ENGINE = MergeTree ORDER BY a PARTITION BY id
SETTINGS min_rows_for_compact_part = 1000, min_rows_for_wide_part = 2000;
INSERT INTO t3 VALUES (3, 11, 'tt'), (3, 12, 'yy');
ALTER TABLE t2 REPLACE PARTITION 3 FROM t3;
SELECT * FROM t2 ORDER BY a;
SELECT table, name, part_type, rows FROM system.parts WHERE table = 't2' AND active AND database = currentDatabase() ORDER BY name;
SELECT table, name, part_type, rows FROM system.parts WHERE table = 't3' AND active AND database = currentDatabase() ORDER BY name;
SELECT '^ replace ==================';
ALTER TABLE t3 FREEZE PARTITION 3;
SELECT name, part_type, is_frozen, rows FROM system.parts WHERE table = 't3' AND active AND database = currentDatabase() ORDER BY name;
SELECT '^ freeze ==================';
DROP TABLE t2;
DROP TABLE t3;

View File

@ -12,7 +12,6 @@ create_query="CREATE TABLE subcolumns(n Nullable(UInt32), a1 Array(UInt32),\
# "StripeLog"
declare -a ENGINES=("Log" "TinyLog" "Memory" \
"MergeTree ORDER BY tuple() SETTINGS min_bytes_for_compact_part='10M'" \
"MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part='10M'" \
"MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part=0")

View File

@ -8,9 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames0"
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames50"
$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_renames0 (value UInt64, data String) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_rename', '1') ORDER BY tuple() SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, min_rows_for_compact_part = 100000, min_rows_for_compact_part = 10000000, write_ahead_log_max_bytes = 1"
$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_renames0 (value UInt64, data String) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_rename', '1') ORDER BY tuple() SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0"
$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_renames0 SELECT number, toString(number) FROM numbers(1000)"

View File

@ -35,7 +35,7 @@ $CLICKHOUSE_CLIENT --database_atomic_wait_for_drop_and_detach_synchronously=1 --
# InMemory - [0..5]
# Compact - (5..10]
# Wide - >10
$CLICKHOUSE_CLIENT --query="CREATE TABLE data_01600 (part_type String, key Int) ENGINE = MergeTree PARTITION BY part_type ORDER BY key SETTINGS min_bytes_for_wide_part=0, min_bytes_for_compact_part=0, min_rows_for_wide_part=10, min_rows_for_compact_part=5"
$CLICKHOUSE_CLIENT --query="CREATE TABLE data_01600 (part_type String, key Int) ENGINE = MergeTree PARTITION BY part_type ORDER BY key SETTINGS min_bytes_for_wide_part=0, min_rows_for_wide_part=10"
# InMemory
$CLICKHOUSE_CLIENT --query="INSERT INTO data_01600 SELECT 'InMemory', number FROM system.numbers LIMIT 1"

View File

@ -8,8 +8,6 @@ wide fsync_after_insert
1
wide fsync_after_insert,fsync_part_directory
1
memory in_memory_parts_insert_sync
1
wide fsync_part_directory,vertical
1
2

View File

@ -37,13 +37,6 @@ select * from data_01643;
optimize table data_01643 final;
drop table data_01643;
select 'memory in_memory_parts_insert_sync';
create table data_01643 (key Int) engine=MergeTree() order by key settings min_rows_for_compact_part=2, in_memory_parts_insert_sync=1, fsync_after_insert=1, fsync_part_directory=1;
insert into data_01643 values (1);
select * from data_01643;
optimize table data_01643 final;
drop table data_01643;
select 'wide fsync_part_directory,vertical';
create table data_01643 (key Int) engine=MergeTree() order by key settings min_bytes_for_wide_part=0, fsync_part_directory=1, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0;
insert into data_01643 values (1);

View File

@ -8,8 +8,6 @@ wide fsync_after_insert
1
wide fsync_after_insert,fsync_part_directory
1
memory in_memory_parts_insert_sync
1
wide fsync_part_directory,vertical
1
2

View File

@ -63,17 +63,6 @@ system sync replica rep_fsync_r2;
drop table rep_fsync_r1;
drop table rep_fsync_r2;
select 'memory in_memory_parts_insert_sync';
create table rep_fsync_r1 (key Int) engine=ReplicatedMergeTree('/clickhouse/tables/{database}/rep_fsync', 'r1') order by key settings min_rows_for_compact_part=2, in_memory_parts_insert_sync=1, fsync_after_insert=1, fsync_part_directory=1;
create table rep_fsync_r2 (key Int) engine=ReplicatedMergeTree('/clickhouse/tables/{database}/rep_fsync', 'r2') order by key settings min_rows_for_compact_part=2, in_memory_parts_insert_sync=1, fsync_after_insert=1, fsync_part_directory=1;
insert into rep_fsync_r1 values (1);
system sync replica rep_fsync_r2;
select * from rep_fsync_r2;
optimize table rep_fsync_r1 final;
system sync replica rep_fsync_r2;
drop table rep_fsync_r1;
drop table rep_fsync_r2;
select 'wide fsync_part_directory,vertical';
create table rep_fsync_r1 (key Int) engine=ReplicatedMergeTree('/clickhouse/tables/{database}/rep_fsync', 'r1') order by key settings min_bytes_for_wide_part=0, fsync_part_directory=1, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0;
create table rep_fsync_r2 (key Int) engine=ReplicatedMergeTree('/clickhouse/tables/{database}/rep_fsync', 'r2') order by key settings min_bytes_for_wide_part=0, fsync_part_directory=1, enable_vertical_merge_algorithm=1, vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0;

View File

@ -4,7 +4,6 @@ ReplacingMergeTree: OK
JSONEachRow: OK
clusterAllReplicas: OK
SimpleAggregateFunction: OK
write_ahead_log_interval_ms_to_fsync: OK
max_concurrent_queries_for_all_users: OK
test_shard_localhost: OK
default_path_test: OK

View File

@ -89,8 +89,6 @@ client_compwords_positive=(
clusterAllReplicas
# system.data_type_families
SimpleAggregateFunction
# system.merge_tree_settings
write_ahead_log_interval_ms_to_fsync
# system.settings
max_concurrent_queries_for_all_users
# system.clusters

View File

@ -10,8 +10,8 @@
3 3
4 4
0
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{shard}/01710_projection_fetch_default\', \'2_{replica}\')\nORDER BY y\nSETTINGS min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32, index_granularity = 8192
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{shard}/01710_projection_fetch_default\', \'2_{replica}\')\nORDER BY y\nSETTINGS min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32, index_granularity = 8192
2
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{shard}/01710_projection_fetch_default\', \'2_{replica}\')\nORDER BY y\nSETTINGS min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32, index_granularity = 8192
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{shard}/01710_projection_fetch_default\', \'2_{replica}\')\nORDER BY y\nSETTINGS min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32, index_granularity = 8192
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{shard}/01710_projection_fetch_default\', \'2_{replica}\')\nORDER BY y\nSETTINGS min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32, index_granularity = 8192
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{shard}/01710_projection_fetch_default\', \'2_{replica}\')\nORDER BY y\nSETTINGS min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32, index_granularity = 8192
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n ),\n PROJECTION pp\n (\n SELECT \n x,\n count()\n GROUP BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{shard}/01710_projection_fetch_default\', \'2_{replica}\')\nORDER BY y\nSETTINGS min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32, index_granularity = 8192
CREATE TABLE default.tp_2\n(\n `x` Int32,\n `y` Int32,\n PROJECTION p\n (\n SELECT \n x,\n y\n ORDER BY x\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{shard}/01710_projection_fetch_default\', \'2_{replica}\')\nORDER BY y\nSETTINGS min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32, index_granularity = 8192

View File

@ -3,9 +3,9 @@
drop table if exists tp_1;
drop table if exists tp_2;
create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/01710_projection_fetch_' || currentDatabase(), '1_{replica}') order by y settings min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32;
create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/01710_projection_fetch_' || currentDatabase(), '1_{replica}') order by y settings min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32;
create table tp_2 (x Int32, y Int32, projection p (select x, y order by x)) engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/01710_projection_fetch_' || currentDatabase(), '2_{replica}') order by y settings min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32;
create table tp_2 (x Int32, y Int32, projection p (select x, y order by x)) engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/01710_projection_fetch_' || currentDatabase(), '2_{replica}') order by y settings min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32;
insert into tp_1 select number, number from numbers(3);

View File

@ -1,6 +1,6 @@
drop table if exists tp;
create table tp (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y settings min_rows_for_compact_part = 2, min_rows_for_wide_part = 4, min_bytes_for_compact_part = 16, min_bytes_for_wide_part = 32;
create table tp (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y settings min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32;
insert into tp select number, number from numbers(3);
insert into tp select number, number from numbers(5);

View File

@ -1,4 +0,0 @@
before DETACH TABLE
500
after DETACH TABLE
500

View File

@ -1,26 +0,0 @@
DROP TABLE IF EXISTS mem_part_flush;
CREATE TABLE mem_part_flush
(
`key` UInt32,
`ts` DateTime,
`db_time` DateTime DEFAULT now()
)
ENGINE = MergeTree
ORDER BY (key, ts)
SETTINGS min_rows_for_compact_part = 1000000, min_bytes_for_compact_part = 200000000, in_memory_parts_enable_wal = 0;
INSERT INTO mem_part_flush(key, ts) SELECT number % 1000, now() + intDiv(number,1000) FROM numbers(500);
SELECT 'before DETACH TABLE';
SELECT count(*) FROM mem_part_flush;
DETACH TABLE mem_part_flush;
ATTACH TABLE mem_part_flush;
SELECT 'after DETACH TABLE';
SELECT count(*) FROM mem_part_flush;
DROP TABLE mem_part_flush;

View File

@ -1,35 +0,0 @@
-- { echo }
DROP TABLE IF EXISTS in_memory;
CREATE TABLE in_memory (a UInt32) ENGINE = MergeTree ORDER BY a SETTINGS min_rows_for_compact_part = 1000, min_bytes_for_wide_part = 10485760;
INSERT INTO in_memory VALUES (1);
INSERT INTO in_memory VALUES (2);
SELECT name, active, part_type FROM system.parts WHERE database = currentDatabase() AND table = 'in_memory';
all_1_1_0 1 InMemory
all_2_2_0 1 InMemory
SELECT * FROM in_memory ORDER BY a;
1
2
-- no WAL remove since parts are still in use
DETACH TABLE in_memory;
ATTACH TABLE in_memory;
SELECT name, active, part_type FROM system.parts WHERE database = currentDatabase() AND table = 'in_memory';
all_1_1_0 1 InMemory
all_2_2_0 1 InMemory
SELECT * FROM in_memory ORDER BY a;
1
2
-- WAL should be removed, since on disk part covers all parts in WAL
OPTIMIZE TABLE in_memory;
DETACH TABLE in_memory;
ATTACH TABLE in_memory;
SELECT name, active, part_type FROM system.parts WHERE database = currentDatabase() AND table = 'in_memory';
all_1_2_1 1 Compact
-- check that the WAL will be reinitialized after remove
INSERT INTO in_memory VALUES (3);
DETACH TABLE in_memory;
ATTACH TABLE in_memory;
SELECT * FROM in_memory ORDER BY a;
1
2
3

View File

@ -1,29 +0,0 @@
-- Tags: no-s3-storage
-- { echo }
DROP TABLE IF EXISTS in_memory;
CREATE TABLE in_memory (a UInt32) ENGINE = MergeTree ORDER BY a SETTINGS min_rows_for_compact_part = 1000, min_bytes_for_wide_part = 10485760;
INSERT INTO in_memory VALUES (1);
INSERT INTO in_memory VALUES (2);
SELECT name, active, part_type FROM system.parts WHERE database = currentDatabase() AND table = 'in_memory';
SELECT * FROM in_memory ORDER BY a;
-- no WAL remove since parts are still in use
DETACH TABLE in_memory;
ATTACH TABLE in_memory;
SELECT name, active, part_type FROM system.parts WHERE database = currentDatabase() AND table = 'in_memory';
SELECT * FROM in_memory ORDER BY a;
-- WAL should be removed, since on disk part covers all parts in WAL
OPTIMIZE TABLE in_memory;
DETACH TABLE in_memory;
ATTACH TABLE in_memory;
SELECT name, active, part_type FROM system.parts WHERE database = currentDatabase() AND table = 'in_memory';
-- check that the WAL will be reinitialized after remove
INSERT INTO in_memory VALUES (3);
DETACH TABLE in_memory;
ATTACH TABLE in_memory;
SELECT * FROM in_memory ORDER BY a;

View File

@ -1,14 +0,0 @@
init state
30
0_1_1_0 InMemory 10 1
1_2_2_0 InMemory 10 1
2_3_3_0 InMemory 10 1
drop part 0
20
1_2_2_0 InMemory 10 1
2_3_3_0 InMemory 10 1
detach table
attach table
20
1_2_2_0 InMemory 10 1
2_3_3_0 InMemory 10 1

View File

@ -1,40 +0,0 @@
-- Tags: no-s3-storage
DROP TABLE IF EXISTS table_in_memory;
CREATE TABLE table_in_memory
(
`id` UInt64,
`value` UInt64
)
ENGINE = MergeTree
PARTITION BY id
ORDER BY value
SETTINGS min_bytes_for_wide_part=1000, min_bytes_for_compact_part=900;
SELECT 'init state';
INSERT INTO table_in_memory SELECT intDiv(number, 10), number FROM numbers(30);
SELECT count() FROM table_in_memory;
SELECT name, part_type, rows, active from system.parts
WHERE table='table_in_memory' AND database=currentDatabase();
SELECT 'drop part 0';
ALTER TABLE table_in_memory DROP PARTITION 0;
SELECT count() FROM table_in_memory;
SELECT name, part_type, rows, active from system.parts
WHERE table='table_in_memory' AND database=currentDatabase() AND active;
SELECT 'detach table';
DETACH TABLE table_in_memory;
SELECT name, part_type, rows, active from system.parts
WHERE table='table_in_memory' AND database=currentDatabase();
SELECT 'attach table';
ATTACH TABLE table_in_memory;
SELECT count() FROM table_in_memory;
SELECT name, part_type, rows, active from system.parts
WHERE table='table_in_memory' AND database=currentDatabase() and active;

View File

@ -10,26 +10,12 @@ CREATE TABLE data_compact
)
ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS min_rows_for_compact_part=0, min_bytes_for_compact_part=0, min_rows_for_wide_part=100, min_bytes_for_wide_part=1e9;
SETTINGS min_rows_for_wide_part=100, min_bytes_for_wide_part=1e9;
INSERT INTO data_compact VALUES ([0]);
ALTER TABLE data_compact ADD COLUMN root.nested_array Array(Array(UInt8));
SELECT table, part_type FROM system.parts WHERE table = 'data_compact' AND database = currentDatabase();
SELECT root.nested_array FROM data_compact;
-- memory
DROP TABLE IF EXISTS data_memory;
CREATE TABLE data_memory
(
`root.array` Array(UInt8),
)
ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS min_rows_for_compact_part=100, min_bytes_for_compact_part=1e9, min_rows_for_wide_part=100, min_bytes_for_wide_part=1e9, in_memory_parts_enable_wal=0;
INSERT INTO data_memory VALUES ([0]);
ALTER TABLE data_memory ADD COLUMN root.nested_array Array(Array(UInt8));
SELECT table, part_type FROM system.parts WHERE table = 'data_memory' AND database = currentDatabase();
SELECT root.nested_array FROM data_memory;
-- wide
DROP TABLE IF EXISTS data_wide;
CREATE TABLE data_wide
@ -38,7 +24,7 @@ CREATE TABLE data_wide
)
ENGINE = MergeTree()
ORDER BY tuple()
SETTINGS min_rows_for_wide_part=0, min_bytes_for_wide_part=0, min_rows_for_wide_part=0, min_bytes_for_wide_part=0;
SETTINGS min_rows_for_wide_part=0, min_bytes_for_wide_part=0;
INSERT INTO data_wide VALUES ([0]);
ALTER TABLE data_wide ADD COLUMN root.nested_array Array(Array(UInt8));
SELECT table, part_type FROM system.parts WHERE table = 'data_wide' AND database = currentDatabase();

View File

@ -32,11 +32,8 @@ DROP TABLE IF EXISTS times;
CREATE TABLE times (t DateTime) ENGINE MergeTree ORDER BY t
SETTINGS
storage_policy='default',
min_rows_for_compact_part = 0,
min_bytes_for_compact_part = 0,
min_rows_for_wide_part = 1000000,
min_bytes_for_wide_part = 1000000,
in_memory_parts_enable_wal = 0,
ratio_of_defaults_for_sparse_serialization=1.0;
"
@ -74,4 +71,3 @@ AND ( query LIKE '%SELECT % FROM times%' OR query LIKE '%INSERT INTO times%' )
AND type = 'QueryFinish'
ORDER BY query_start_time_microseconds ASC, query DESC;
"

View File

@ -1,15 +0,0 @@
-- Tags: no-s3-storage
DROP TABLE IF EXISTS inmemory_test;
CREATE TABLE inmemory_test (d Date, id String)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/inmemory_test', 'r1')
PARTITION BY toYYYYMMDD(d) ORDER BY (d, id)
SETTINGS min_rows_for_compact_part = 10, index_granularity = 8192;
INSERT INTO inmemory_test(d, id) VALUES('2023-01-01', 'abcdefghijklmnopqrstuvwxyz');
INSERT INTO inmemory_test(d, id) VALUES('2023-01-01', 'a1234567890123456789012345');
SELECT COUNT(1) FROM inmemory_test;
DROP TABLE inmemory_test;

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT -nm -q "
drop table if exists data;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, min_bytes_for_compact_part=0;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9;
"
# reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT -nm -q "
drop table if exists data;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, min_bytes_for_compact_part=0;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9;
"
query_id=$(random_str 10)

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT -nm -q "
drop table if exists data;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9, min_bytes_for_compact_part=0;
create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9;
"
# reading 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds

View File

@ -23,7 +23,6 @@ if (NOT DEFINED ENABLE_UTILS OR ENABLE_UTILS)
add_subdirectory (wikistat-loader)
add_subdirectory (check-marks)
add_subdirectory (checksum-for-compressed-block)
add_subdirectory (wal-dump)
add_subdirectory (check-mysql-binlog)
add_subdirectory (keeper-bench)

View File

@ -1,2 +0,0 @@
clickhouse_add_executable (wal-dump main.cpp)
target_link_libraries(wal-dump PRIVATE dbms boost::program_options)

View File

@ -1,78 +0,0 @@
#include <iostream>
#include <boost/program_options.hpp>
#include <Compression/CompressedReadBuffer.h>
#include <Compression/CompressedReadBufferFromFile.h>
#include <Compression/CompressedWriteBuffer.h>
#include <Formats/NativeReader.h>
#include <IO/Operators.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromFileDescriptor.h>
#include <Storages/MergeTree/MergeTreeWriteAheadLog.h>
/*
* Dump the Write Ahead Log file, outputs:
* Part 0, Version: 0, Action : ADD_PART, Name: 4_1_1_0, Block:
a Int32 Int32(size = 2), b Int32 Int32(size = 2), c Int32 Int32(size = 2)
*/
static void dump(const std::string & bin_path)
{
DB::ReadBufferFromFile in(bin_path);
DB::NativeReader block_in(in, 0);
DB::Block block;
DB::WriteBufferFromFileDescriptor out(STDOUT_FILENO);
for (size_t part_num = 0; !in.eof(); ++part_num)
{
UInt8 version;
String part_name;
DB::MergeTreeWriteAheadLog::ActionType action_type;
DB::readIntBinary(version, in);
DB::readIntBinary(action_type, in);
DB::readStringBinary(part_name, in);
block = block_in.read();
out << "Part " << part_num << ", Version: " << version
<< ", Action : " << (action_type == DB::MergeTreeWriteAheadLog::ActionType::ADD_PART ? "ADD_PART" : "DROP_PART")
<< ", Name: " << part_name << ", Block:\n";
out << block.dumpStructure() << "\n";
out << "\n" << DB::flush;
}
}
int main(int argc, char ** argv)
{
boost::program_options::options_description desc("Allowed options");
desc.add_options()("help,h", "produce help message");
boost::program_options::variables_map options;
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
if (options.count("help") || argc != 2)
{
std::cout << "Usage: " << argv[0] << " wal.bin" << std::endl;
std::cout << desc << std::endl;
return 1;
}
try
{
dump(argv[1]);
}
catch (const DB::Exception & e)
{
std::cerr << e.what() << ", " << e.message() << std::endl
<< std::endl
<< "Stack trace:" << std::endl
<< e.getStackTraceString() << std::endl;
throw;
}
return 0;
}