mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 17:41:59 +00:00
Merge pull request #45134 from ClickHouse/fix_flaky_azure_test
Fix flaky azure test
This commit is contained in:
commit
df8ceeefdc
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
||||
Subproject commit ef75afc075fc71fbcd8fe28dcda3794ae265fd1c
|
||||
Subproject commit ea8c3044f43f5afa7016d2d580ed201f495d7e94
|
@ -606,5 +606,10 @@ if (ENABLE_TESTS)
|
||||
target_link_libraries(unit_tests_dbms PRIVATE ch_contrib::yaml_cpp)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::azure_sdk)
|
||||
target_link_libraries(unit_tests_dbms PRIVATE ch_contrib::azure_sdk)
|
||||
endif()
|
||||
|
||||
|
||||
add_check(unit_tests_dbms)
|
||||
endif ()
|
||||
|
25
src/Disks/tests/gtest_azure_xml_reader.cpp
Normal file
25
src/Disks/tests/gtest_azure_xml_reader.cpp
Normal file
@ -0,0 +1,25 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
|
||||
#include <azure/storage/blobs.hpp>
|
||||
#include <azure/storage/common/internal/xml_wrapper.hpp>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
|
||||
TEST(AzureXMLWrapper, TestLeak)
|
||||
{
|
||||
std::string str = "<hello>world</hello>";
|
||||
|
||||
Azure::Storage::_internal::XmlReader reader(str.c_str(), str.length());
|
||||
Azure::Storage::_internal::XmlReader reader2(std::move(reader));
|
||||
Azure::Storage::_internal::XmlReader reader3 = std::move(reader2);
|
||||
reader3.Read();
|
||||
}
|
||||
|
||||
#endif
|
@ -7,49 +7,29 @@
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
|
||||
template <typename T>
|
||||
DB::DiskPtr createDisk();
|
||||
|
||||
|
||||
template <>
|
||||
DB::DiskPtr createDisk<DB::DiskLocal>()
|
||||
DB::DiskPtr createDisk()
|
||||
{
|
||||
fs::create_directory("tmp/");
|
||||
return std::make_shared<DB::DiskLocal>("local_disk", "tmp/", 0);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
void destroyDisk(DB::DiskPtr & disk)
|
||||
{
|
||||
disk.reset();
|
||||
}
|
||||
|
||||
|
||||
template <>
|
||||
void destroyDisk<DB::DiskLocal>(DB::DiskPtr & disk)
|
||||
{
|
||||
disk.reset();
|
||||
fs::remove_all("tmp/");
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
class DiskTest : public testing::Test
|
||||
{
|
||||
public:
|
||||
void SetUp() override { disk = createDisk<T>(); }
|
||||
void TearDown() override { destroyDisk<T>(disk); }
|
||||
void SetUp() override { disk = createDisk(); }
|
||||
void TearDown() override { destroyDisk(disk); }
|
||||
|
||||
DB::DiskPtr disk;
|
||||
};
|
||||
|
||||
|
||||
using DiskImplementations = testing::Types<DB::DiskLocal>;
|
||||
TYPED_TEST_SUITE(DiskTest, DiskImplementations);
|
||||
|
||||
|
||||
TYPED_TEST(DiskTest, createDirectories)
|
||||
TEST_F(DiskTest, createDirectories)
|
||||
{
|
||||
this->disk->createDirectories("test_dir1/");
|
||||
EXPECT_TRUE(this->disk->isDirectory("test_dir1/"));
|
||||
@ -59,7 +39,7 @@ TYPED_TEST(DiskTest, createDirectories)
|
||||
}
|
||||
|
||||
|
||||
TYPED_TEST(DiskTest, writeFile)
|
||||
TEST_F(DiskTest, writeFile)
|
||||
{
|
||||
{
|
||||
std::unique_ptr<DB::WriteBuffer> out = this->disk->writeFile("test_file");
|
||||
@ -77,7 +57,7 @@ TYPED_TEST(DiskTest, writeFile)
|
||||
}
|
||||
|
||||
|
||||
TYPED_TEST(DiskTest, readFile)
|
||||
TEST_F(DiskTest, readFile)
|
||||
{
|
||||
{
|
||||
std::unique_ptr<DB::WriteBuffer> out = this->disk->writeFile("test_file");
|
||||
@ -112,7 +92,7 @@ TYPED_TEST(DiskTest, readFile)
|
||||
}
|
||||
|
||||
|
||||
TYPED_TEST(DiskTest, iterateDirectory)
|
||||
TEST_F(DiskTest, iterateDirectory)
|
||||
{
|
||||
this->disk->createDirectories("test_dir/nested_dir/");
|
||||
|
||||
|
@ -3,14 +3,6 @@
|
||||
#include <Disks/DiskLocal.h>
|
||||
#include <Disks/IDisk.h>
|
||||
|
||||
template <typename T>
|
||||
DB::DiskPtr createDisk();
|
||||
|
||||
template <>
|
||||
DB::DiskPtr createDisk<DB::DiskLocal>();
|
||||
|
||||
template <typename T>
|
||||
void destroyDisk(DB::DiskPtr & disk);
|
||||
|
||||
template <>
|
||||
void destroyDisk<DB::DiskLocal>(DB::DiskPtr & disk);
|
||||
|
@ -3,7 +3,7 @@
|
||||
#include <Disks/IDisk.h>
|
||||
|
||||
|
||||
TEST(DiskTest, parentPath)
|
||||
TEST(DiskPathTest, parentPath)
|
||||
{
|
||||
EXPECT_EQ("", DB::parentPath("test_dir/"));
|
||||
EXPECT_EQ("test_dir/", DB::parentPath("test_dir/nested_dir/"));
|
||||
@ -11,7 +11,7 @@ TEST(DiskTest, parentPath)
|
||||
}
|
||||
|
||||
|
||||
TEST(DiskTest, fileName)
|
||||
TEST(DiskPathTest, fileName)
|
||||
{
|
||||
EXPECT_EQ("test_file", DB::fileName("test_file"));
|
||||
EXPECT_EQ("nested_file", DB::fileName("test_dir/nested_file"));
|
||||
|
@ -39,21 +39,20 @@ DB::StoragePtr createStorage(DB::DiskPtr & disk)
|
||||
return table;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
class StorageLogTest : public testing::Test
|
||||
{
|
||||
public:
|
||||
|
||||
void SetUp() override
|
||||
{
|
||||
disk = createDisk<T>();
|
||||
disk = createDisk();
|
||||
table = createStorage(disk);
|
||||
}
|
||||
|
||||
void TearDown() override
|
||||
{
|
||||
table->flushAndShutdown();
|
||||
destroyDisk<T>(disk);
|
||||
destroyDisk(disk);
|
||||
}
|
||||
|
||||
const DB::DiskPtr & getDisk() { return disk; }
|
||||
@ -65,9 +64,6 @@ private:
|
||||
};
|
||||
|
||||
|
||||
using DiskImplementations = testing::Types<DB::DiskLocal>;
|
||||
TYPED_TEST_SUITE(StorageLogTest, DiskImplementations);
|
||||
|
||||
// Returns data written to table in Values format.
|
||||
std::string writeData(int rows, DB::StoragePtr & table, const DB::ContextPtr context)
|
||||
{
|
||||
@ -153,7 +149,7 @@ std::string readData(DB::StoragePtr & table, const DB::ContextPtr context)
|
||||
return out_buf.str();
|
||||
}
|
||||
|
||||
TYPED_TEST(StorageLogTest, testReadWrite)
|
||||
TEST_F(StorageLogTest, testReadWrite)
|
||||
{
|
||||
using namespace DB;
|
||||
const auto & context_holder = getContext();
|
||||
|
@ -42,10 +42,10 @@ def cluster():
|
||||
# For inserts there is no guarantee that retries will not result in duplicates.
|
||||
# But it is better to retry anyway because 'Connection was closed by the server' error
|
||||
# happens in fact only for inserts because reads already have build-in retries in code.
|
||||
def azure_query(node, query, try_num=3):
|
||||
def azure_query(node, query, try_num=3, settings={}):
|
||||
for i in range(try_num):
|
||||
try:
|
||||
return node.query(query)
|
||||
return node.query(query, settings=settings)
|
||||
except Exception as ex:
|
||||
retriable_errors = [
|
||||
"DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response"
|
||||
@ -80,7 +80,7 @@ def create_table(node, table_name, **additional_settings):
|
||||
ORDER BY (dt, id)
|
||||
SETTINGS {",".join((k+"="+repr(v) for k, v in settings.items()))}"""
|
||||
|
||||
node.query(f"DROP TABLE IF EXISTS {table_name}")
|
||||
azure_query(node, f"DROP TABLE IF EXISTS {table_name}")
|
||||
azure_query(node, create_table_statement)
|
||||
assert (
|
||||
azure_query(node, f"SELECT COUNT(*) FROM {table_name} FORMAT Values") == "(0)"
|
||||
@ -230,9 +230,9 @@ def test_alter_table_columns(cluster):
|
||||
f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096, -1)}",
|
||||
)
|
||||
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} ADD COLUMN col1 UInt64 DEFAULT 1")
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} ADD COLUMN col1 UInt64 DEFAULT 1")
|
||||
# To ensure parts have been merged
|
||||
node.query(f"OPTIMIZE TABLE {TABLE_NAME}")
|
||||
azure_query(node, f"OPTIMIZE TABLE {TABLE_NAME}")
|
||||
|
||||
assert (
|
||||
azure_query(node, f"SELECT sum(col1) FROM {TABLE_NAME} FORMAT Values")
|
||||
@ -245,7 +245,8 @@ def test_alter_table_columns(cluster):
|
||||
== "(4096)"
|
||||
)
|
||||
|
||||
node.query(
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} MODIFY COLUMN col1 String",
|
||||
settings={"mutations_sync": 2},
|
||||
)
|
||||
@ -271,26 +272,27 @@ def test_attach_detach_partition(cluster):
|
||||
== "(8192)"
|
||||
)
|
||||
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} DETACH PARTITION '2020-01-03'")
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} DETACH PARTITION '2020-01-03'")
|
||||
assert (
|
||||
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values")
|
||||
== "(4096)"
|
||||
)
|
||||
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} ATTACH PARTITION '2020-01-03'")
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} ATTACH PARTITION '2020-01-03'")
|
||||
assert (
|
||||
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values")
|
||||
== "(8192)"
|
||||
)
|
||||
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} DROP PARTITION '2020-01-03'")
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} DROP PARTITION '2020-01-03'")
|
||||
assert (
|
||||
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values")
|
||||
== "(4096)"
|
||||
)
|
||||
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} DETACH PARTITION '2020-01-04'")
|
||||
node.query(
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} DETACH PARTITION '2020-01-04'")
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} DROP DETACHED PARTITION '2020-01-04'",
|
||||
settings={"allow_drop_detached": 1},
|
||||
)
|
||||
@ -314,16 +316,18 @@ def test_move_partition_to_another_disk(cluster):
|
||||
== "(8192)"
|
||||
)
|
||||
|
||||
node.query(
|
||||
f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-04' TO DISK '{LOCAL_DISK}'"
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-04' TO DISK '{LOCAL_DISK}'",
|
||||
)
|
||||
assert (
|
||||
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values")
|
||||
== "(8192)"
|
||||
)
|
||||
|
||||
node.query(
|
||||
f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-04' TO DISK '{AZURE_BLOB_STORAGE_DISK}'"
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-04' TO DISK '{AZURE_BLOB_STORAGE_DISK}'",
|
||||
)
|
||||
assert (
|
||||
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values")
|
||||
@ -344,14 +348,14 @@ def test_table_manipulations(cluster):
|
||||
f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}"
|
||||
)
|
||||
|
||||
node.query(f"RENAME TABLE {TABLE_NAME} TO {renamed_table}")
|
||||
azure_query(node, f"RENAME TABLE {TABLE_NAME} TO {renamed_table}")
|
||||
assert (
|
||||
azure_query(node, f"SELECT count(*) FROM {renamed_table} FORMAT Values")
|
||||
== "(8192)"
|
||||
)
|
||||
|
||||
node.query(f"RENAME TABLE {renamed_table} TO {TABLE_NAME}")
|
||||
assert node.query(f"CHECK TABLE {TABLE_NAME} FORMAT Values") == "(1)"
|
||||
azure_query(node, f"RENAME TABLE {renamed_table} TO {TABLE_NAME}")
|
||||
assert azure_query(node, f"CHECK TABLE {TABLE_NAME} FORMAT Values") == "(1)"
|
||||
|
||||
node.query(f"DETACH TABLE {TABLE_NAME}")
|
||||
node.query(f"ATTACH TABLE {TABLE_NAME}")
|
||||
@ -360,7 +364,7 @@ def test_table_manipulations(cluster):
|
||||
== "(8192)"
|
||||
)
|
||||
|
||||
node.query(f"TRUNCATE TABLE {TABLE_NAME}")
|
||||
azure_query(node, f"TRUNCATE TABLE {TABLE_NAME}")
|
||||
assert (
|
||||
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values") == "(0)"
|
||||
)
|
||||
@ -395,11 +399,13 @@ def test_move_replace_partition_to_another_table(cluster):
|
||||
|
||||
create_table(node, table_clone_name)
|
||||
|
||||
node.query(
|
||||
f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-03' TO TABLE {table_clone_name}"
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-03' TO TABLE {table_clone_name}",
|
||||
)
|
||||
node.query(
|
||||
f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-05' TO TABLE {table_clone_name}"
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} MOVE PARTITION '2020-01-05' TO TABLE {table_clone_name}",
|
||||
)
|
||||
assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)"
|
||||
assert (
|
||||
@ -428,11 +434,13 @@ def test_move_replace_partition_to_another_table(cluster):
|
||||
== "(1024)"
|
||||
)
|
||||
|
||||
node.query(
|
||||
f"ALTER TABLE {TABLE_NAME} REPLACE PARTITION '2020-01-03' FROM {table_clone_name}"
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} REPLACE PARTITION '2020-01-03' FROM {table_clone_name}",
|
||||
)
|
||||
node.query(
|
||||
f"ALTER TABLE {TABLE_NAME} REPLACE PARTITION '2020-01-05' FROM {table_clone_name}"
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} REPLACE PARTITION '2020-01-05' FROM {table_clone_name}",
|
||||
)
|
||||
assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)"
|
||||
assert (
|
||||
@ -448,16 +456,16 @@ def test_move_replace_partition_to_another_table(cluster):
|
||||
== "(512)"
|
||||
)
|
||||
|
||||
node.query(f"DROP TABLE {table_clone_name} NO DELAY")
|
||||
azure_query(node, f"DROP TABLE {table_clone_name} NO DELAY")
|
||||
assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)"
|
||||
assert (
|
||||
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values")
|
||||
== "(1024)"
|
||||
)
|
||||
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} FREEZE")
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} FREEZE")
|
||||
|
||||
node.query(f"DROP TABLE {TABLE_NAME} NO DELAY")
|
||||
azure_query(node, f"DROP TABLE {TABLE_NAME} NO DELAY")
|
||||
|
||||
|
||||
def test_freeze_unfreeze(cluster):
|
||||
@ -470,20 +478,21 @@ def test_freeze_unfreeze(cluster):
|
||||
azure_query(
|
||||
node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}"
|
||||
)
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} FREEZE WITH NAME '{backup1}'")
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} FREEZE WITH NAME '{backup1}'")
|
||||
azure_query(
|
||||
node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-04', 4096)}"
|
||||
)
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} FREEZE WITH NAME '{backup2}'")
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} FREEZE WITH NAME '{backup2}'")
|
||||
|
||||
azure_query(node, f"TRUNCATE TABLE {TABLE_NAME}")
|
||||
|
||||
# Unfreeze single partition from backup1.
|
||||
node.query(
|
||||
f"ALTER TABLE {TABLE_NAME} UNFREEZE PARTITION '2020-01-03' WITH NAME '{backup1}'"
|
||||
azure_query(
|
||||
node,
|
||||
f"ALTER TABLE {TABLE_NAME} UNFREEZE PARTITION '2020-01-03' WITH NAME '{backup1}'",
|
||||
)
|
||||
# Unfreeze all partitions from backup2.
|
||||
node.query(f"ALTER TABLE {TABLE_NAME} UNFREEZE WITH NAME '{backup2}'")
|
||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} UNFREEZE WITH NAME '{backup2}'")
|
||||
|
||||
|
||||
def test_apply_new_settings(cluster):
|
||||
@ -524,8 +533,8 @@ def test_big_insert(cluster):
|
||||
node,
|
||||
f"INSERT INTO {TABLE_NAME} {check_query}",
|
||||
)
|
||||
assert azure_query(node, f"SELECT * FROM {TABLE_NAME} ORDER BY id") == node.query(
|
||||
check_query
|
||||
assert azure_query(node, f"SELECT * FROM {TABLE_NAME} ORDER BY id") == azure_query(
|
||||
node, check_query
|
||||
)
|
||||
|
||||
blob_container_client = cluster.blob_service_client.get_container_client(
|
||||
|
Loading…
Reference in New Issue
Block a user