mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 09:02:00 +00:00
build and test fixes
This commit is contained in:
parent
e26e873b0d
commit
fc06f99476
@ -502,7 +502,7 @@ void DDLWorker::parseQueryAndResolveHost(DDLTask & task)
|
||||
else ///circular replication is used.
|
||||
{
|
||||
is_circular_replicated = true;
|
||||
auto query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(task.query.get());
|
||||
auto * query_with_table = dynamic_cast<ASTQueryWithTableAndOutput *>(task.query.get());
|
||||
if (query_with_table == nullptr || query_with_table->database.empty())
|
||||
{
|
||||
throw Exception(
|
||||
|
@ -4378,10 +4378,10 @@ void StorageReplicatedMergeTree::sendRequestToLeaderReplica(const ASTPtr & query
|
||||
|
||||
/// SECONDARY_QUERY here means, that we received query from DDLWorker
|
||||
/// there is no sense to send query to leader, because he will receive it from own DDLWorker
|
||||
// if (query_context.getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY)
|
||||
// {
|
||||
// throw Exception("Cannot execute DDL query, because leader was suddenly changed or logical error.", ErrorCodes::LEADERSHIP_CHANGED);
|
||||
// }
|
||||
if (query_context.getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY)
|
||||
{
|
||||
throw Exception("Cannot execute DDL query, because leader was suddenly changed or logical error.", ErrorCodes::LEADERSHIP_CHANGED);
|
||||
}
|
||||
|
||||
ReplicatedMergeTreeAddress leader_address(getZooKeeper()->get(zookeeper_path + "/replicas/" + leader + "/host"));
|
||||
|
||||
|
@ -18,21 +18,27 @@ def started_cluster():
|
||||
cluster.start()
|
||||
|
||||
node1.query('''
|
||||
CREATE DATABASE replica_1 on cluster cross_3shards_2replicas;
|
||||
CREATE DATABASE replica_2 on cluster cross_3shards_2replicas;
|
||||
|
||||
CREATE TABLE replica_1.replicated_local on cluster cross_3shards_2replicas (part_key Date, id UInt32, shard_id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{replica}')
|
||||
partition by part_key order by id;
|
||||
CREATE TABLE replica_1.replicated on cluster cross_3shards_2replicas as replica_1.replicated_local
|
||||
ENGINE = Distributed(cross_3shards_2replicas, '', replicated_local, shard_id);
|
||||
|
||||
CREATE TABLE replica_2.replicated_local on cluster cross_3shards_2replicas (part_key Date, id UInt32, shard_id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard_bk}/replicated', '{replica_bk}')
|
||||
partition by part_key order by id;
|
||||
CREATE TABLE replica_2.replicated on cluster cross_3shards_2replicas as replica_2.replicated_local
|
||||
ENGINE = Distributed(cross_3shards_2replicas, '', replicated_local, shard_id);
|
||||
''')
|
||||
CREATE DATABASE replica_1 ON CLUSTER cross_3shards_2replicas;
|
||||
CREATE DATABASE replica_2 ON CLUSTER cross_3shards_2replicas;
|
||||
|
||||
CREATE TABLE replica_1.replicated_local
|
||||
ON CLUSTER cross_3shards_2replicas (part_key Date, id UInt32, shard_id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{replica}')
|
||||
partition by part_key order by id;
|
||||
|
||||
CREATE TABLE replica_1.replicated
|
||||
ON CLUSTER cross_3shards_2replicas as replica_1.replicated_local
|
||||
ENGINE = Distributed(cross_3shards_2replicas, '', replicated_local, shard_id);
|
||||
|
||||
CREATE TABLE replica_2.replicated_local
|
||||
ON CLUSTER cross_3shards_2replicas (part_key Date, id UInt32, shard_id UInt32)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard_bk}/replicated', '{replica_bk}')
|
||||
partition by part_key order by id;
|
||||
|
||||
CREATE TABLE replica_2.replicated
|
||||
ON CLUSTER cross_3shards_2replicas as replica_2.replicated_local
|
||||
ENGINE = Distributed(cross_3shards_2replicas, '', replicated_local, shard_id);
|
||||
''')
|
||||
|
||||
to_insert = '''\
|
||||
2017-06-16 10 0
|
||||
@ -42,29 +48,31 @@ CREATE TABLE replica_2.replicated on cluster cross_3shards_2replicas as replica
|
||||
2017-06-16 30 2
|
||||
2017-06-17 31 2
|
||||
'''
|
||||
node1.query("INSERT INTO replica_1.replicated FORMAT TSV", stdin=to_insert)
|
||||
time.sleep(0.5)
|
||||
|
||||
node1.query("INSERT INTO replica_1.replicated FORMAT TSV", stdin=to_insert, settings={"insert_distributed_sync" : 1})
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
# pass
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_alter_ddl(started_cluster):
|
||||
node1.query('''alter table replica_1.replicated_local on cluster cross_3shards_2replicas update shard_id=shard_id+3
|
||||
where part_key='2017-06-16';
|
||||
''')
|
||||
node1.query("ALTER TABLE replica_1.replicated_local \
|
||||
ON CLUSTER cross_3shards_2replicas \
|
||||
UPDATE shard_id=shard_id+3 \
|
||||
WHERE part_key='2017-06-16'")
|
||||
|
||||
node1.query("SYSTEM SYNC REPLICA replica_2.replicated_local;", timeout=5)
|
||||
assert_eq_with_retry(node1, "SELECT count(*) FROM replica_2.replicated where shard_id >= 3 and part_key='2017-06-16'", '3')
|
||||
|
||||
node1.query("alter table replica_1.replicated_local on cluster cross_3shards_2replicas delete where shard_id >=3;")
|
||||
node1.query("ALTER TABLE replica_1.replicated_local \
|
||||
ON CLUSTER cross_3shards_2replicas DELETE WHERE shard_id >=3;")
|
||||
node1.query("SYSTEM SYNC REPLICA replica_2.replicated_local;", timeout=5)
|
||||
assert_eq_with_retry(node1, "SELECT count(*) FROM replica_2.replicated where shard_id >= 3", '0')
|
||||
|
||||
node2.query("alter table replica_1.replicated_local on cluster cross_3shards_2replicas \
|
||||
drop partition toDate('2017-06-17');")
|
||||
node2.query("ALTER TABLE replica_1.replicated_local ON CLUSTER cross_3shards_2replicas DROP PARTITION '2017-06-17'")
|
||||
|
||||
node2.query("SYSTEM SYNC REPLICA replica_2.replicated_local;", timeout=5)
|
||||
assert_eq_with_retry(node1, "SELECT count(*) FROM replica_2.replicated", '0')
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user