Fix tests

This commit is contained in:
divanik 2024-06-17 17:40:52 +00:00
parent 253d745ac5
commit 6f841d89e7
9 changed files with 49 additions and 48 deletions

View File

@ -56,12 +56,9 @@ private:
std::cout << relative_path << ":\n";
if (!file_names.empty())
{
for (const auto & file_name : file_names)
if (show_hidden || (!file_name.starts_with('.')))
selected_and_sorted_file_names.push_back(file_name);
}
for (const auto & file_name : file_names)
if (show_hidden || (!file_name.starts_with('.')))
selected_and_sorted_file_names.push_back(file_name);
std::sort(selected_and_sorted_file_names.begin(), selected_and_sorted_file_names.end());
for (const auto & file_name : selected_and_sorted_file_names)
@ -84,7 +81,9 @@ private:
}
}();
if (disk.isDirectory(path))
{
listRecursive(disk, path, show_hidden);
}
}
}
};

View File

@ -49,17 +49,15 @@ CommandPtr DisksApp::getCommandByName(const String & command) const
std::vector<String> DisksApp::getEmptyCompletion(String command_name) const
{
auto command_ptr = command_descriptions.at(command_name);
auto answer = [&]() -> std::vector<String>
std::vector<String> answer{};
if (multidisk_commands.contains(command_ptr->command_name))
{
if (multidisk_commands.contains(command_ptr->command_name))
{
return client->getAllFilesByPatternFromAllDisks("");
}
else
{
return client->getCurrentDiskWithPath().getAllFilesByPattern("");
}
}();
answer = client->getAllFilesByPatternFromAllDisks("");
}
else
{
answer = client->getCurrentDiskWithPath().getAllFilesByPattern("");
}
for (const auto & disk_name : client->getAllDiskNames())
{
answer.push_back(disk_name);
@ -211,7 +209,7 @@ bool DisksApp::processQueryText(const String & text)
int code = getCurrentExceptionCode();
if (code == ErrorCodes::LOGICAL_ERROR)
{
throw err;
throw std::move(err);
}
else if (code == ErrorCodes::BAD_ARGUMENTS)
{
@ -467,7 +465,7 @@ int DisksApp::main(const std::vector<String> & /*args*/)
registerDisks(/* global_skip_access_check= */ true);
registerFormats();
auto shared_context = Context::createShared();
shared_context = Context::createShared();
global_context = Context::createGlobal(shared_context.get());
global_context->makeGlobalContext();

View File

@ -68,6 +68,8 @@ private:
static String word_break_characters;
// General command line arguments parsing fields
SharedContextHolder shared_context;
ContextMutablePtr global_context;
ProgramOptionsDescription options_description;
CommandLineOptions options;

View File

@ -14,7 +14,6 @@ namespace ErrorCodes
namespace DB
{
DiskWithPath::DiskWithPath(DiskPtr disk_, std::optional<String> path_) : disk(disk_)
{
if (path_.has_value())

View File

@ -32,7 +32,10 @@ public:
String getCurrentPath() const { return path; }
bool isDirectory(const String & any_path) const { return disk->isDirectory(getRelativeFromRoot(any_path)); }
bool isDirectory(const String & any_path) const
{
return disk->isDirectory(getRelativeFromRoot(any_path)) || disk->isDirectory(getAbsolutePath(any_path));
}
std::vector<String> listAllFilesByPath(const String & any_path) const;

View File

@ -53,7 +53,7 @@ protected:
{
return options[name].as<T>();
}
catch (boost::bad_any_cast)
catch (boost::bad_any_cast &)
{
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Argument '{}' has wrong type and can't be parsed", name);
}

View File

@ -14,14 +14,14 @@ function run_test_for_disk()
echo "$disk"
clickhouse-disks -C "$config" --disk "$disk" write --input "$config" $CLICKHOUSE_DATABASE/test
clickhouse-disks -C "$config" --log-level test --disk "$disk" copy $CLICKHOUSE_DATABASE/test $CLICKHOUSE_DATABASE/test.copy |& {
clickhouse-disks -C "$config" --disk "$disk" --query "write --path-from "$config" $CLICKHOUSE_DATABASE/test"
clickhouse-disks -C "$config" --log-level test --disk "$disk" --query "copy $CLICKHOUSE_DATABASE/test $CLICKHOUSE_DATABASE/test.copy" |& {
grep -o -e "Single part upload has completed." -e "Single operation copy has completed."
}
clickhouse-disks -C "$config" --disk "$disk" remove $CLICKHOUSE_DATABASE/test
clickhouse-disks -C "$config" --disk "$disk" --query "remove $CLICKHOUSE_DATABASE/test"
# NOTE: this is due to "copy" does works like "cp -R from to/" instead of "cp from to"
clickhouse-disks -C "$config" --disk "$disk" remove $CLICKHOUSE_DATABASE/test.copy/test
clickhouse-disks -C "$config" --disk "$disk" remove $CLICKHOUSE_DATABASE/test.copy
clickhouse-disks -C "$config" --disk "$disk" --query "remove $CLICKHOUSE_DATABASE/test.copy/test"
clickhouse-disks -C "$config" --disk "$disk" --query "remove $CLICKHOUSE_DATABASE/test.copy"
}
function run_test_copy_from_s3_to_s3(){
@ -29,13 +29,13 @@ function run_test_copy_from_s3_to_s3(){
local disk_dest=$1 && shift
echo "copy from $disk_src to $disk_dest"
clickhouse-disks -C "$config" --disk "$disk_src" write --input "$config" $CLICKHOUSE_DATABASE/test
clickhouse-disks -C "$config" --disk "$disk_src" --query "write --path-from "$config" $CLICKHOUSE_DATABASE/test"
clickhouse-disks -C "$config" --log-level test copy --disk-from "$disk_src" --disk-to "$disk_dest" $CLICKHOUSE_DATABASE/test $CLICKHOUSE_DATABASE/test.copy |& {
clickhouse-disks -C "$config" --log-level test --query "copy --disk-from "$disk_src" --disk-to "$disk_dest" $CLICKHOUSE_DATABASE/test $CLICKHOUSE_DATABASE/test.copy" |& {
grep -o -e "Single part upload has completed." -e "Single operation copy has completed."
}
clickhouse-disks -C "$config" --disk "$disk_dest" remove $CLICKHOUSE_DATABASE/test.copy/test
clickhouse-disks -C "$config" --disk "$disk_dest" remove $CLICKHOUSE_DATABASE/test.copy
clickhouse-disks -C "$config" --disk "$disk_dest" --query "remove $CLICKHOUSE_DATABASE/test.copy/test"
clickhouse-disks -C "$config" --disk "$disk_dest" --query "remove $CLICKHOUSE_DATABASE/test.copy"
}
run_test_for_disk s3_plain_native_copy

View File

@ -3,28 +3,28 @@ data after ATTACH 1
Files before DETACH TABLE
all_1_1_0
backups/ordinary_default/data/ordinary_default/data/all_1_1_0:
primary.cidx
serialization.json
metadata_version.txt
default_compression_codec.txt
/backups/ordinary_default/data/ordinary_default/data/all_1_1_0:
checksums.txt
columns.txt
count.txt
data.bin
data.cmrk3
count.txt
columns.txt
checksums.txt
default_compression_codec.txt
metadata_version.txt
primary.cidx
serialization.json
Files after DETACH TABLE
all_1_1_0
backups/ordinary_default/data/ordinary_default/data/all_1_1_0:
primary.cidx
serialization.json
metadata_version.txt
default_compression_codec.txt
/backups/ordinary_default/data/ordinary_default/data/all_1_1_0:
checksums.txt
columns.txt
count.txt
data.bin
data.cmrk3
count.txt
columns.txt
checksums.txt
default_compression_codec.txt
metadata_version.txt
primary.cidx
serialization.json

View File

@ -49,11 +49,11 @@ path=$($CLICKHOUSE_CLIENT -q "SELECT replace(data_paths[1], 's3_plain', '') FROM
path=${path%/}
echo "Files before DETACH TABLE"
clickhouse-disks -C "$config" --disk s3_plain_disk list --recursive "${path:?}" | tail -n+2
clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive "${path:?}"" | tail -n+2
$CLICKHOUSE_CLIENT -q "detach table data"
echo "Files after DETACH TABLE"
clickhouse-disks -C "$config" --disk s3_plain_disk list --recursive "$path" | tail -n+2
clickhouse-disks -C "$config" --disk s3_plain_disk --query "list --recursive "$path"" | tail -n+2
# metadata file is left
$CLICKHOUSE_CLIENT --force_remove_data_recursively_on_drop=1 -q "drop database if exists $CLICKHOUSE_DATABASE"