mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Allow experimantal features when recovering Replicated db replica (#53167)
* allow experimantal features when recovering Replicated db replica * Automatic style fix --------- Co-authored-by: robot-clickhouse <robot-clickhouse@users.noreply.github.com>
This commit is contained in:
parent
85af996d5c
commit
48ed54e822
@ -818,6 +818,31 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
||||
query_context->setQueryKindReplicatedDatabaseInternal();
|
||||
query_context->setCurrentDatabase(getDatabaseName());
|
||||
query_context->setCurrentQueryId("");
|
||||
|
||||
/// We will execute some CREATE queries for recovery (not ATTACH queries),
|
||||
/// so we need to allow experimental features that can be used in a CREATE query
|
||||
query_context->setSetting("allow_experimental_inverted_index", 1);
|
||||
query_context->setSetting("allow_experimental_codecs", 1);
|
||||
query_context->setSetting("allow_experimental_live_view", 1);
|
||||
query_context->setSetting("allow_experimental_window_view", 1);
|
||||
query_context->setSetting("allow_experimental_funnel_functions", 1);
|
||||
query_context->setSetting("allow_experimental_nlp_functions", 1);
|
||||
query_context->setSetting("allow_experimental_hash_functions", 1);
|
||||
query_context->setSetting("allow_experimental_object_type", 1);
|
||||
query_context->setSetting("allow_experimental_annoy_index", 1);
|
||||
query_context->setSetting("allow_experimental_bigint_types", 1);
|
||||
query_context->setSetting("allow_experimental_window_functions", 1);
|
||||
query_context->setSetting("allow_experimental_geo_types", 1);
|
||||
query_context->setSetting("allow_experimental_map_type", 1);
|
||||
|
||||
query_context->setSetting("allow_suspicious_low_cardinality_types", 1);
|
||||
query_context->setSetting("allow_suspicious_fixed_string_types", 1);
|
||||
query_context->setSetting("allow_suspicious_indices", 1);
|
||||
query_context->setSetting("allow_suspicious_codecs", 1);
|
||||
query_context->setSetting("allow_hyperscan", 1);
|
||||
query_context->setSetting("allow_simdjson", 1);
|
||||
query_context->setSetting("allow_deprecated_syntax_for_merge_tree", 1);
|
||||
|
||||
auto txn = std::make_shared<ZooKeeperMetadataTransaction>(current_zookeeper, zookeeper_path, false, "");
|
||||
query_context->initZooKeeperMetadataTransaction(txn);
|
||||
return query_context;
|
||||
|
@ -98,7 +98,9 @@ void S3QueueFilesMetadata::S3QueueProcessedCollection::parse(const String & coll
|
||||
|
||||
void S3QueueFilesMetadata::S3QueueProcessedCollection::add(const String & file_name)
|
||||
{
|
||||
TrackedCollectionItem processed_file = { .file_path=file_name, .timestamp = getCurrentTime() };
|
||||
TrackedCollectionItem processed_file;
|
||||
processed_file.file_path = file_name;
|
||||
processed_file.timestamp = getCurrentTime();
|
||||
files.push_back(processed_file);
|
||||
|
||||
/// TODO: it is strange that in parse() we take into account only max_age, but here only max_size.
|
||||
|
@ -17,6 +17,9 @@ class S3QueueFilesMetadata
|
||||
public:
|
||||
struct TrackedCollectionItem
|
||||
{
|
||||
TrackedCollectionItem() = default;
|
||||
TrackedCollectionItem(const String & file_path_, UInt64 timestamp_, UInt64 retries_count_, const String & last_exception_)
|
||||
: file_path(file_path_), timestamp(timestamp_), retries_count(retries_count_), last_exception(last_exception_) {}
|
||||
String file_path;
|
||||
UInt64 timestamp = 0;
|
||||
UInt64 retries_count = 0;
|
||||
|
@ -4,6 +4,8 @@
|
||||
<allow_drop_detached>1</allow_drop_detached>
|
||||
<allow_experimental_database_replicated>1</allow_experimental_database_replicated>
|
||||
<allow_experimental_alter_materialized_view_structure>1</allow_experimental_alter_materialized_view_structure>
|
||||
<allow_experimental_object_type>0</allow_experimental_object_type>
|
||||
<allow_suspicious_codecs>0</allow_suspicious_codecs>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
|
@ -672,7 +672,11 @@ def test_alters_from_different_replicas(started_cluster):
|
||||
|
||||
|
||||
def create_some_tables(db):
|
||||
settings = {"distributed_ddl_task_timeout": 0}
|
||||
settings = {
|
||||
"distributed_ddl_task_timeout": 0,
|
||||
"allow_experimental_object_type": 1,
|
||||
"allow_suspicious_codecs": 1,
|
||||
}
|
||||
main_node.query(f"CREATE TABLE {db}.t1 (n int) ENGINE=Memory", settings=settings)
|
||||
dummy_node.query(
|
||||
f"CREATE TABLE {db}.t2 (s String) ENGINE=Memory", settings=settings
|
||||
@ -690,11 +694,11 @@ def create_some_tables(db):
|
||||
settings=settings,
|
||||
)
|
||||
dummy_node.query(
|
||||
f"CREATE TABLE {db}.rmt2 (n int) ENGINE=ReplicatedMergeTree order by n",
|
||||
f"CREATE TABLE {db}.rmt2 (n int CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12))) ENGINE=ReplicatedMergeTree order by n",
|
||||
settings=settings,
|
||||
)
|
||||
main_node.query(
|
||||
f"CREATE TABLE {db}.rmt3 (n int) ENGINE=ReplicatedMergeTree order by n",
|
||||
f"CREATE TABLE {db}.rmt3 (n int, json Object('json') materialized '') ENGINE=ReplicatedMergeTree order by n",
|
||||
settings=settings,
|
||||
)
|
||||
dummy_node.query(
|
||||
@ -868,7 +872,10 @@ def test_recover_staled_replica(started_cluster):
|
||||
]:
|
||||
assert main_node.query(f"SELECT (*,).1 FROM recover.{table}") == "42\n"
|
||||
for table in ["t2", "rmt1", "rmt2", "rmt4", "d1", "d2", "mt2", "mv1", "mv3"]:
|
||||
assert dummy_node.query(f"SELECT (*,).1 FROM recover.{table}") == "42\n"
|
||||
assert (
|
||||
dummy_node.query(f"SELECT '{table}', (*,).1 FROM recover.{table}")
|
||||
== f"{table}\t42\n"
|
||||
)
|
||||
for table in ["m1", "mt1"]:
|
||||
assert dummy_node.query(f"SELECT count() FROM recover.{table}") == "0\n"
|
||||
global test_recover_staled_replica_run
|
||||
|
@ -24,6 +24,11 @@ system flush logs;
|
||||
select * from system.zookeeper_log where path like '/test/02439/s1/' || currentDatabase() || '/block_numbers/%'
|
||||
and op_num in ('List', 'SimpleList', 'FilteredList')
|
||||
and path not like '%/block_numbers/1' and path not like '%/block_numbers/123'
|
||||
and event_time >= now() - interval 1 minute;
|
||||
and event_time >= now() - interval 1 minute
|
||||
-- avoid race with tests like 02311_system_zookeeper_insert
|
||||
and (query_id is null or query_id='' or query_id in
|
||||
(select query_id from system.query_log
|
||||
where event_time >= now() - interval 1 minute and current_database=currentDatabase())
|
||||
);
|
||||
|
||||
drop table rmt;
|
||||
|
Loading…
Reference in New Issue
Block a user