mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into progress-bar
This commit is contained in:
commit
de277f8ac4
@ -26,8 +26,6 @@
|
|||||||
#include <Poco/Observer.h>
|
#include <Poco/Observer.h>
|
||||||
#include <Poco/AutoPtr.h>
|
#include <Poco/AutoPtr.h>
|
||||||
#include <Poco/PatternFormatter.h>
|
#include <Poco/PatternFormatter.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Poco/Message.h>
|
#include <Poco/Message.h>
|
||||||
#include <Poco/Util/Application.h>
|
#include <Poco/Util/Application.h>
|
||||||
#include <Poco/Exception.h>
|
#include <Poco/Exception.h>
|
||||||
@ -59,6 +57,7 @@
|
|||||||
#include <Common/getExecutablePath.h>
|
#include <Common/getExecutablePath.h>
|
||||||
#include <Common/getHashOfLoadedBinary.h>
|
#include <Common/getHashOfLoadedBinary.h>
|
||||||
#include <Common/Elf.h>
|
#include <Common/Elf.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
# include <Common/config_version.h>
|
# include <Common/config_version.h>
|
||||||
@ -70,6 +69,7 @@
|
|||||||
#endif
|
#endif
|
||||||
#include <ucontext.h>
|
#include <ucontext.h>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
DB::PipeFDs signal_pipe;
|
DB::PipeFDs signal_pipe;
|
||||||
|
|
||||||
@ -437,11 +437,11 @@ static void sanitizerDeathCallback()
|
|||||||
|
|
||||||
static std::string createDirectory(const std::string & file)
|
static std::string createDirectory(const std::string & file)
|
||||||
{
|
{
|
||||||
auto path = Poco::Path(file).makeParent();
|
fs::path path = fs::path(file).parent_path();
|
||||||
if (path.toString().empty())
|
if (path.empty())
|
||||||
return "";
|
return "";
|
||||||
Poco::File(path).createDirectories();
|
fs::create_directories(path);
|
||||||
return path.toString();
|
return path;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -449,7 +449,7 @@ static bool tryCreateDirectories(Poco::Logger * logger, const std::string & path
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
Poco::File(path).createDirectories();
|
fs::create_directories(path);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
@ -470,7 +470,7 @@ void BaseDaemon::reloadConfiguration()
|
|||||||
*/
|
*/
|
||||||
config_path = config().getString("config-file", getDefaultConfigFileName());
|
config_path = config().getString("config-file", getDefaultConfigFileName());
|
||||||
DB::ConfigProcessor config_processor(config_path, false, true);
|
DB::ConfigProcessor config_processor(config_path, false, true);
|
||||||
config_processor.setConfigPath(Poco::Path(config_path).makeParent().toString());
|
config_processor.setConfigPath(fs::path(config_path).parent_path());
|
||||||
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
|
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
|
||||||
|
|
||||||
if (last_configuration != nullptr)
|
if (last_configuration != nullptr)
|
||||||
@ -524,18 +524,20 @@ std::string BaseDaemon::getDefaultConfigFileName() const
|
|||||||
void BaseDaemon::closeFDs()
|
void BaseDaemon::closeFDs()
|
||||||
{
|
{
|
||||||
#if defined(OS_FREEBSD) || defined(OS_DARWIN)
|
#if defined(OS_FREEBSD) || defined(OS_DARWIN)
|
||||||
Poco::File proc_path{"/dev/fd"};
|
fs::path proc_path{"/dev/fd"};
|
||||||
#else
|
#else
|
||||||
Poco::File proc_path{"/proc/self/fd"};
|
fs::path proc_path{"/proc/self/fd"};
|
||||||
#endif
|
#endif
|
||||||
if (proc_path.isDirectory()) /// Hooray, proc exists
|
if (fs::is_directory(proc_path)) /// Hooray, proc exists
|
||||||
{
|
{
|
||||||
std::vector<std::string> fds;
|
/// in /proc/self/fd directory filenames are numeric file descriptors.
|
||||||
/// in /proc/self/fd directory filenames are numeric file descriptors
|
/// Iterate directory separately from closing fds to avoid closing iterated directory fd.
|
||||||
proc_path.list(fds);
|
std::vector<int> fds;
|
||||||
for (const auto & fd_str : fds)
|
for (const auto & path : fs::directory_iterator(proc_path))
|
||||||
|
fds.push_back(DB::parse<int>(path.path().filename()));
|
||||||
|
|
||||||
|
for (const auto & fd : fds)
|
||||||
{
|
{
|
||||||
int fd = DB::parse<int>(fd_str);
|
|
||||||
if (fd > 2 && fd != signal_pipe.fds_rw[0] && fd != signal_pipe.fds_rw[1])
|
if (fd > 2 && fd != signal_pipe.fds_rw[0] && fd != signal_pipe.fds_rw[1])
|
||||||
::close(fd);
|
::close(fd);
|
||||||
}
|
}
|
||||||
@ -597,7 +599,7 @@ void BaseDaemon::initialize(Application & self)
|
|||||||
{
|
{
|
||||||
/** When creating pid file and looking for config, will search for paths relative to the working path of the program when started.
|
/** When creating pid file and looking for config, will search for paths relative to the working path of the program when started.
|
||||||
*/
|
*/
|
||||||
std::string path = Poco::Path(config().getString("application.path")).setFileName("").toString();
|
std::string path = fs::path(config().getString("application.path")).replace_filename("");
|
||||||
if (0 != chdir(path.c_str()))
|
if (0 != chdir(path.c_str()))
|
||||||
throw Poco::Exception("Cannot change directory to " + path);
|
throw Poco::Exception("Cannot change directory to " + path);
|
||||||
}
|
}
|
||||||
@ -645,7 +647,7 @@ void BaseDaemon::initialize(Application & self)
|
|||||||
|
|
||||||
std::string log_path = config().getString("logger.log", "");
|
std::string log_path = config().getString("logger.log", "");
|
||||||
if (!log_path.empty())
|
if (!log_path.empty())
|
||||||
log_path = Poco::Path(log_path).setFileName("").toString();
|
log_path = fs::path(log_path).replace_filename("");
|
||||||
|
|
||||||
/** Redirect stdout, stderr to separate files in the log directory (or in the specified file).
|
/** Redirect stdout, stderr to separate files in the log directory (or in the specified file).
|
||||||
* Some libraries write to stderr in case of errors in debug mode,
|
* Some libraries write to stderr in case of errors in debug mode,
|
||||||
@ -708,8 +710,7 @@ void BaseDaemon::initialize(Application & self)
|
|||||||
|
|
||||||
tryCreateDirectories(&logger(), core_path);
|
tryCreateDirectories(&logger(), core_path);
|
||||||
|
|
||||||
Poco::File cores = core_path;
|
if (!(fs::exists(core_path) && fs::is_directory(core_path)))
|
||||||
if (!(cores.exists() && cores.isDirectory()))
|
|
||||||
{
|
{
|
||||||
core_path = !log_path.empty() ? log_path : "/opt/";
|
core_path = !log_path.empty() ? log_path : "/opt/";
|
||||||
tryCreateDirectories(&logger(), core_path);
|
tryCreateDirectories(&logger(), core_path);
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#include <daemon/SentryWriter.h>
|
#include <daemon/SentryWriter.h>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Util/Application.h>
|
#include <Poco/Util/Application.h>
|
||||||
#include <Poco/Util/LayeredConfiguration.h>
|
#include <Poco/Util/LayeredConfiguration.h>
|
||||||
|
|
||||||
@ -25,6 +24,7 @@
|
|||||||
# include <stdio.h>
|
# include <stdio.h>
|
||||||
# include <filesystem>
|
# include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
@ -53,8 +53,7 @@ void setExtras()
|
|||||||
sentry_set_extra("physical_cpu_cores", sentry_value_new_int32(getNumberOfPhysicalCPUCores()));
|
sentry_set_extra("physical_cpu_cores", sentry_value_new_int32(getNumberOfPhysicalCPUCores()));
|
||||||
|
|
||||||
if (!server_data_path.empty())
|
if (!server_data_path.empty())
|
||||||
sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix(
|
sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix(fs::space(server_data_path).free).c_str()));
|
||||||
Poco::File(server_data_path).freeSpace()).c_str()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void sentry_logger(sentry_level_e level, const char * message, va_list args, void *)
|
void sentry_logger(sentry_level_e level, const char * message, va_list args, void *)
|
||||||
@ -110,12 +109,12 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
|
|||||||
if (enabled)
|
if (enabled)
|
||||||
{
|
{
|
||||||
server_data_path = config.getString("path", "");
|
server_data_path = config.getString("path", "");
|
||||||
const std::filesystem::path & default_tmp_path = std::filesystem::path(config.getString("tmp_path", Poco::Path::temp())) / "sentry";
|
const std::filesystem::path & default_tmp_path = fs::path(config.getString("tmp_path", fs::temp_directory_path())) / "sentry";
|
||||||
const std::string & endpoint
|
const std::string & endpoint
|
||||||
= config.getString("send_crash_reports.endpoint");
|
= config.getString("send_crash_reports.endpoint");
|
||||||
const std::string & temp_folder_path
|
const std::string & temp_folder_path
|
||||||
= config.getString("send_crash_reports.tmp_path", default_tmp_path);
|
= config.getString("send_crash_reports.tmp_path", default_tmp_path);
|
||||||
Poco::File(temp_folder_path).createDirectories();
|
fs::create_directories(temp_folder_path);
|
||||||
|
|
||||||
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
|
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
|
||||||
sentry_options_set_release(options, VERSION_STRING_SHORT);
|
sentry_options_set_release(options, VERSION_STRING_SHORT);
|
||||||
|
@ -6,10 +6,11 @@
|
|||||||
#include "OwnFormattingChannel.h"
|
#include "OwnFormattingChannel.h"
|
||||||
#include "OwnPatternFormatter.h"
|
#include "OwnPatternFormatter.h"
|
||||||
#include <Poco/ConsoleChannel.h>
|
#include <Poco/ConsoleChannel.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
#include <Poco/Net/RemoteSyslogChannel.h>
|
#include <Poco/Net/RemoteSyslogChannel.h>
|
||||||
#include <Poco/Path.h>
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -20,11 +21,11 @@ namespace DB
|
|||||||
// TODO: move to libcommon
|
// TODO: move to libcommon
|
||||||
static std::string createDirectory(const std::string & file)
|
static std::string createDirectory(const std::string & file)
|
||||||
{
|
{
|
||||||
auto path = Poco::Path(file).makeParent();
|
auto path = fs::path(file).parent_path();
|
||||||
if (path.toString().empty())
|
if (path.empty())
|
||||||
return "";
|
return "";
|
||||||
Poco::File(path).createDirectories();
|
fs::create_directories(path);
|
||||||
return path.toString();
|
return path;
|
||||||
};
|
};
|
||||||
|
|
||||||
void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
|
void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
|
||||||
@ -70,7 +71,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
|||||||
|
|
||||||
// Set up two channel chains.
|
// Set up two channel chains.
|
||||||
log_file = new Poco::FileChannel;
|
log_file = new Poco::FileChannel;
|
||||||
log_file->setProperty(Poco::FileChannel::PROP_PATH, Poco::Path(log_path).absolute().toString());
|
log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(log_path));
|
||||||
log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
||||||
log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
||||||
log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
||||||
@ -102,7 +103,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
|||||||
std::cerr << "Logging errors to " << errorlog_path << std::endl;
|
std::cerr << "Logging errors to " << errorlog_path << std::endl;
|
||||||
|
|
||||||
error_log_file = new Poco::FileChannel;
|
error_log_file = new Poco::FileChannel;
|
||||||
error_log_file->setProperty(Poco::FileChannel::PROP_PATH, Poco::Path(errorlog_path).absolute().toString());
|
error_log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(errorlog_path));
|
||||||
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
||||||
error_log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
error_log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
||||||
error_log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
error_log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <random>
|
#include <random>
|
||||||
#include <pcg_random.hpp>
|
#include <pcg_random.hpp>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Util/Application.h>
|
#include <Poco/Util/Application.h>
|
||||||
#include <Common/Stopwatch.h>
|
#include <Common/Stopwatch.h>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
@ -36,7 +35,9 @@
|
|||||||
#include <Common/Config/configReadClient.h>
|
#include <Common/Config/configReadClient.h>
|
||||||
#include <Common/TerminalSize.h>
|
#include <Common/TerminalSize.h>
|
||||||
#include <Common/StudentTTest.h>
|
#include <Common/StudentTTest.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
/** A tool for evaluating ClickHouse performance.
|
/** A tool for evaluating ClickHouse performance.
|
||||||
* The tool emulates a case with fixed amount of simultaneously executing queries.
|
* The tool emulates a case with fixed amount of simultaneously executing queries.
|
||||||
@ -119,8 +120,8 @@ public:
|
|||||||
|
|
||||||
int main(const std::vector<std::string> &) override
|
int main(const std::vector<std::string> &) override
|
||||||
{
|
{
|
||||||
if (!json_path.empty() && Poco::File(json_path).exists()) /// Clear file with previous results
|
if (!json_path.empty() && fs::exists(json_path)) /// Clear file with previous results
|
||||||
Poco::File(json_path).remove();
|
fs::remove(json_path);
|
||||||
|
|
||||||
readQueries();
|
readQueries();
|
||||||
runBenchmark();
|
runBenchmark();
|
||||||
|
@ -25,7 +25,6 @@
|
|||||||
#include <boost/program_options.hpp>
|
#include <boost/program_options.hpp>
|
||||||
#include <boost/algorithm/string/replace.hpp>
|
#include <boost/algorithm/string/replace.hpp>
|
||||||
#include <Poco/String.h>
|
#include <Poco/String.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Util/Application.h>
|
#include <Poco/Util/Application.h>
|
||||||
#include <common/find_symbols.h>
|
#include <common/find_symbols.h>
|
||||||
#include <common/LineReader.h>
|
#include <common/LineReader.h>
|
||||||
@ -86,6 +85,8 @@
|
|||||||
#include <Common/TerminalSize.h>
|
#include <Common/TerminalSize.h>
|
||||||
#include <Common/UTF8Helpers.h>
|
#include <Common/UTF8Helpers.h>
|
||||||
#include <Common/ProgressIndication.h>
|
#include <Common/ProgressIndication.h>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
# include <Common/config_version.h>
|
# include <Common/config_version.h>
|
||||||
@ -95,6 +96,7 @@
|
|||||||
#pragma GCC optimize("-fno-var-tracking-assignments")
|
#pragma GCC optimize("-fno-var-tracking-assignments")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -275,7 +277,7 @@ private:
|
|||||||
|
|
||||||
/// Set path for format schema files
|
/// Set path for format schema files
|
||||||
if (config().has("format_schema_path"))
|
if (config().has("format_schema_path"))
|
||||||
context->setFormatSchemaPath(Poco::Path(config().getString("format_schema_path")).toString());
|
context->setFormatSchemaPath(fs::weakly_canonical(config().getString("format_schema_path")));
|
||||||
|
|
||||||
/// Initialize query_id_formats if any
|
/// Initialize query_id_formats if any
|
||||||
if (config().has("query_id_formats"))
|
if (config().has("query_id_formats"))
|
||||||
@ -632,8 +634,8 @@ private:
|
|||||||
history_file = home_path + "/.clickhouse-client-history";
|
history_file = home_path + "/.clickhouse-client-history";
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!history_file.empty() && !Poco::File(history_file).exists())
|
if (!history_file.empty() && !fs::exists(history_file))
|
||||||
Poco::File(history_file).createFile();
|
FS::createFile(history_file);
|
||||||
|
|
||||||
LineReader::Patterns query_extenders = {"\\"};
|
LineReader::Patterns query_extenders = {"\\"};
|
||||||
LineReader::Patterns query_delimiters = {";", "\\G"};
|
LineReader::Patterns query_delimiters = {";", "\\G"};
|
||||||
|
@ -5,7 +5,9 @@
|
|||||||
#include <Formats/registerFormats.h>
|
#include <Formats/registerFormats.h>
|
||||||
#include <ext/scope_guard_safe.h>
|
#include <ext/scope_guard_safe.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -26,7 +28,7 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self)
|
|||||||
copy_fault_probability = std::max(std::min(config().getDouble("copy-fault-probability"), 1.0), 0.0);
|
copy_fault_probability = std::max(std::min(config().getDouble("copy-fault-probability"), 1.0), 0.0);
|
||||||
if (config().has("move-fault-probability"))
|
if (config().has("move-fault-probability"))
|
||||||
move_fault_probability = std::max(std::min(config().getDouble("move-fault-probability"), 1.0), 0.0);
|
move_fault_probability = std::max(std::min(config().getDouble("move-fault-probability"), 1.0), 0.0);
|
||||||
base_dir = (config().has("base-dir")) ? config().getString("base-dir") : Poco::Path::current();
|
base_dir = (config().has("base-dir")) ? config().getString("base-dir") : fs::current_path().string();
|
||||||
|
|
||||||
|
|
||||||
if (config().has("experimental-use-sample-offset"))
|
if (config().has("experimental-use-sample-offset"))
|
||||||
@ -38,18 +40,18 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self)
|
|||||||
|
|
||||||
process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid);
|
process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid);
|
||||||
host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id;
|
host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id;
|
||||||
process_path = Poco::Path(base_dir + "/clickhouse-copier_" + process_id).absolute().toString();
|
process_path = fs::weakly_canonical(fs::path(base_dir) / ("clickhouse-copier_" + process_id));
|
||||||
Poco::File(process_path).createDirectories();
|
fs::create_directories(process_path);
|
||||||
|
|
||||||
/// Override variables for BaseDaemon
|
/// Override variables for BaseDaemon
|
||||||
if (config().has("log-level"))
|
if (config().has("log-level"))
|
||||||
config().setString("logger.level", config().getString("log-level"));
|
config().setString("logger.level", config().getString("log-level"));
|
||||||
|
|
||||||
if (config().has("base-dir") || !config().has("logger.log"))
|
if (config().has("base-dir") || !config().has("logger.log"))
|
||||||
config().setString("logger.log", process_path + "/log.log");
|
config().setString("logger.log", fs::path(process_path) / "log.log");
|
||||||
|
|
||||||
if (config().has("base-dir") || !config().has("logger.errorlog"))
|
if (config().has("base-dir") || !config().has("logger.errorlog"))
|
||||||
config().setString("logger.errorlog", process_path + "/log.err.log");
|
config().setString("logger.errorlog", fs::path(process_path) / "log.err.log");
|
||||||
|
|
||||||
Base::initialize(self);
|
Base::initialize(self);
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#include <Poco/FormattingChannel.h>
|
#include <Poco/FormattingChannel.h>
|
||||||
#include <Poco/PatternFormatter.h>
|
#include <Poco/PatternFormatter.h>
|
||||||
#include <Poco/UUIDGenerator.h>
|
#include <Poco/UUIDGenerator.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Process.h>
|
#include <Poco/Process.h>
|
||||||
#include <Poco/FileChannel.h>
|
#include <Poco/FileChannel.h>
|
||||||
#include <Poco/SplitterChannel.h>
|
#include <Poco/SplitterChannel.h>
|
||||||
|
@ -288,7 +288,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
bool is_symlink = fs::is_symlink(symlink_path);
|
bool is_symlink = fs::is_symlink(symlink_path);
|
||||||
fs::path points_to;
|
fs::path points_to;
|
||||||
if (is_symlink)
|
if (is_symlink)
|
||||||
points_to = fs::absolute(fs::read_symlink(symlink_path));
|
points_to = fs::weakly_canonical(fs::read_symlink(symlink_path));
|
||||||
|
|
||||||
if (is_symlink && points_to == main_bin_path)
|
if (is_symlink && points_to == main_bin_path)
|
||||||
{
|
{
|
||||||
|
@ -42,9 +42,9 @@
|
|||||||
#include <common/argsToConfig.h>
|
#include <common/argsToConfig.h>
|
||||||
#include <Common/TerminalSize.h>
|
#include <Common/TerminalSize.h>
|
||||||
#include <Common/randomSeed.h>
|
#include <Common/randomSeed.h>
|
||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -72,11 +72,11 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
|||||||
Poco::Util::Application::initialize(self);
|
Poco::Util::Application::initialize(self);
|
||||||
|
|
||||||
/// Load config files if exists
|
/// Load config files if exists
|
||||||
if (config().has("config-file") || Poco::File("config.xml").exists())
|
if (config().has("config-file") || fs::exists("config.xml"))
|
||||||
{
|
{
|
||||||
const auto config_path = config().getString("config-file", "config.xml");
|
const auto config_path = config().getString("config-file", "config.xml");
|
||||||
ConfigProcessor config_processor(config_path, false, true);
|
ConfigProcessor config_processor(config_path, false, true);
|
||||||
config_processor.setConfigPath(Poco::Path(config_path).makeParent().toString());
|
config_processor.setConfigPath(fs::path(config_path).parent_path());
|
||||||
auto loaded_config = config_processor.loadConfig();
|
auto loaded_config = config_processor.loadConfig();
|
||||||
config_processor.savePreprocessedConfig(loaded_config, loaded_config.configuration->getString("path", "."));
|
config_processor.savePreprocessedConfig(loaded_config, loaded_config.configuration->getString("path", "."));
|
||||||
config().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false);
|
config().add(loaded_config.configuration.duplicate(), PRIO_DEFAULT, false);
|
||||||
@ -287,8 +287,8 @@ try
|
|||||||
status.emplace(path + "status", StatusFile::write_full_info);
|
status.emplace(path + "status", StatusFile::write_full_info);
|
||||||
|
|
||||||
LOG_DEBUG(log, "Loading metadata from {}", path);
|
LOG_DEBUG(log, "Loading metadata from {}", path);
|
||||||
Poco::File(path + "data/").createDirectories();
|
fs::create_directories(fs::path(path) / "data/");
|
||||||
Poco::File(path + "metadata/").createDirectories();
|
fs::create_directories(fs::path(path) / "metadata/");
|
||||||
loadMetadataSystem(global_context);
|
loadMetadataSystem(global_context);
|
||||||
attachSystemTables(global_context);
|
attachSystemTables(global_context);
|
||||||
loadMetadata(global_context);
|
loadMetadata(global_context);
|
||||||
@ -476,7 +476,7 @@ void LocalServer::setupUsers()
|
|||||||
{
|
{
|
||||||
ConfigurationPtr users_config;
|
ConfigurationPtr users_config;
|
||||||
|
|
||||||
if (config().has("users_config") || config().has("config-file") || Poco::File("config.xml").exists())
|
if (config().has("users_config") || config().has("config-file") || fs::exists("config.xml"))
|
||||||
{
|
{
|
||||||
const auto users_config_path = config().getString("users_config", config().getString("config-file", "config.xml"));
|
const auto users_config_path = config().getString("users_config", config().getString("config-file", "config.xml"));
|
||||||
ConfigProcessor config_processor(users_config_path);
|
ConfigProcessor config_processor(users_config_path);
|
||||||
|
@ -74,6 +74,7 @@
|
|||||||
#include <Server/PostgreSQLHandlerFactory.h>
|
#include <Server/PostgreSQLHandlerFactory.h>
|
||||||
#include <Server/ProtocolServerAdapter.h>
|
#include <Server/ProtocolServerAdapter.h>
|
||||||
#include <Server/HTTP/HTTPServer.h>
|
#include <Server/HTTP/HTTPServer.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
@ -117,6 +118,8 @@ namespace CurrentMetrics
|
|||||||
extern const Metric MaxDDLEntryID;
|
extern const Metric MaxDDLEntryID;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
#if USE_JEMALLOC
|
#if USE_JEMALLOC
|
||||||
static bool jemallocOptionEnabled(const char *name)
|
static bool jemallocOptionEnabled(const char *name)
|
||||||
{
|
{
|
||||||
@ -183,19 +186,19 @@ void setupTmpPath(Poco::Logger * log, const std::string & path)
|
|||||||
{
|
{
|
||||||
LOG_DEBUG(log, "Setting up {} to store temporary data in it", path);
|
LOG_DEBUG(log, "Setting up {} to store temporary data in it", path);
|
||||||
|
|
||||||
Poco::File(path).createDirectories();
|
fs::create_directories(path);
|
||||||
|
|
||||||
/// Clearing old temporary files.
|
/// Clearing old temporary files.
|
||||||
Poco::DirectoryIterator dir_end;
|
fs::directory_iterator dir_end;
|
||||||
for (Poco::DirectoryIterator it(path); it != dir_end; ++it)
|
for (fs::directory_iterator it(path); it != dir_end; ++it)
|
||||||
{
|
{
|
||||||
if (it->isFile() && startsWith(it.name(), "tmp"))
|
if (it->is_regular_file() && startsWith(it->path().filename(), "tmp"))
|
||||||
{
|
{
|
||||||
LOG_DEBUG(log, "Removing old temporary file {}", it->path());
|
LOG_DEBUG(log, "Removing old temporary file {}", it->path().string());
|
||||||
it->remove();
|
fs::remove(it->path());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
LOG_DEBUG(log, "Skipped file in temporary path {}", it->path());
|
LOG_DEBUG(log, "Skipped file in temporary path {}", it->path().string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -678,37 +681,38 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
* Examples: do repair of local data; clone all replicated tables from replica.
|
* Examples: do repair of local data; clone all replicated tables from replica.
|
||||||
*/
|
*/
|
||||||
{
|
{
|
||||||
Poco::File(path + "flags/").createDirectories();
|
auto flags_path = fs::path(path) / "flags/";
|
||||||
global_context->setFlagsPath(path + "flags/");
|
fs::create_directories(flags_path);
|
||||||
|
global_context->setFlagsPath(flags_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Directory with user provided files that are usable by 'file' table function.
|
/** Directory with user provided files that are usable by 'file' table function.
|
||||||
*/
|
*/
|
||||||
{
|
{
|
||||||
|
|
||||||
std::string user_files_path = config().getString("user_files_path", path + "user_files/");
|
std::string user_files_path = config().getString("user_files_path", fs::path(path) / "user_files/");
|
||||||
global_context->setUserFilesPath(user_files_path);
|
global_context->setUserFilesPath(user_files_path);
|
||||||
Poco::File(user_files_path).createDirectories();
|
fs::create_directories(user_files_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
std::string dictionaries_lib_path = config().getString("dictionaries_lib_path", path + "dictionaries_lib/");
|
std::string dictionaries_lib_path = config().getString("dictionaries_lib_path", fs::path(path) / "dictionaries_lib/");
|
||||||
global_context->setDictionariesLibPath(dictionaries_lib_path);
|
global_context->setDictionariesLibPath(dictionaries_lib_path);
|
||||||
Poco::File(dictionaries_lib_path).createDirectories();
|
fs::create_directories(dictionaries_lib_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// top_level_domains_lists
|
/// top_level_domains_lists
|
||||||
{
|
{
|
||||||
const std::string & top_level_domains_path = config().getString("top_level_domains_path", path + "top_level_domains/") + "/";
|
const std::string & top_level_domains_path = config().getString("top_level_domains_path", fs::path(path) / "top_level_domains/");
|
||||||
TLDListsHolder::getInstance().parseConfig(top_level_domains_path, config());
|
TLDListsHolder::getInstance().parseConfig(fs::path(top_level_domains_path) / "", config());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
Poco::File(path + "data/").createDirectories();
|
fs::create_directories(fs::path(path) / "data/");
|
||||||
Poco::File(path + "metadata/").createDirectories();
|
fs::create_directories(fs::path(path) / "metadata/");
|
||||||
|
|
||||||
/// Directory with metadata of tables, which was marked as dropped by Atomic database
|
/// Directory with metadata of tables, which was marked as dropped by Atomic database
|
||||||
Poco::File(path + "metadata_dropped/").createDirectories();
|
fs::create_directories(fs::path(path) / "metadata_dropped/");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
|
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
|
||||||
@ -891,9 +895,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/// Set path for format schema files
|
/// Set path for format schema files
|
||||||
auto format_schema_path = Poco::File(config().getString("format_schema_path", path + "format_schemas/"));
|
fs::path format_schema_path(config().getString("format_schema_path", fs::path(path) / "format_schemas/"));
|
||||||
global_context->setFormatSchemaPath(format_schema_path.path());
|
global_context->setFormatSchemaPath(format_schema_path);
|
||||||
format_schema_path.createDirectories();
|
fs::create_directories(format_schema_path);
|
||||||
|
|
||||||
/// Check sanity of MergeTreeSettings on server startup
|
/// Check sanity of MergeTreeSettings on server startup
|
||||||
global_context->getMergeTreeSettings().sanityCheck(settings);
|
global_context->getMergeTreeSettings().sanityCheck(settings);
|
||||||
|
@ -7,7 +7,9 @@
|
|||||||
#include <boost/range/algorithm/find.hpp>
|
#include <boost/range/algorithm/find.hpp>
|
||||||
#include <boost/range/algorithm_ext/erase.hpp>
|
#include <boost/range/algorithm_ext/erase.hpp>
|
||||||
#include <boost/algorithm/string/predicate.hpp>
|
#include <boost/algorithm/string/predicate.hpp>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -198,9 +200,9 @@ inline String AllowedClientHosts::IPSubnet::toString() const
|
|||||||
if (isMaskAllBitsOne())
|
if (isMaskAllBitsOne())
|
||||||
return prefix.toString();
|
return prefix.toString();
|
||||||
else if (IPAddress{prefix_length, mask.family()} == mask)
|
else if (IPAddress{prefix_length, mask.family()} == mask)
|
||||||
return prefix.toString() + "/" + std::to_string(prefix_length);
|
return fs::path(prefix.toString()) / std::to_string(prefix_length);
|
||||||
else
|
else
|
||||||
return prefix.toString() + "/" + mask.toString();
|
return fs::path(prefix.toString()) / mask.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool AllowedClientHosts::IPSubnet::isMaskAllBitsOne() const
|
inline bool AllowedClientHosts::IPSubnet::isMaskAllBitsOne() const
|
||||||
|
@ -3,9 +3,10 @@
|
|||||||
#include <IO/ReadWriteBufferFromHTTP.h>
|
#include <IO/ReadWriteBufferFromHTTP.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <Poco/Net/HTTPRequest.h>
|
#include <Poco/Net/HTTPRequest.h>
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Poco/URI.h>
|
#include <Poco/URI.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -87,10 +88,10 @@ std::unique_ptr<ShellCommand> IBridgeHelper::startBridgeCommand() const
|
|||||||
|
|
||||||
const auto & config = getConfig();
|
const auto & config = getConfig();
|
||||||
/// Path to executable folder
|
/// Path to executable folder
|
||||||
Poco::Path path{config.getString("application.dir", "/usr/bin")};
|
fs::path path(config.getString("application.dir", "/usr/bin"));
|
||||||
|
|
||||||
std::vector<std::string> cmd_args;
|
std::vector<std::string> cmd_args;
|
||||||
path.setFileName(serviceFileName());
|
path /= serviceFileName();
|
||||||
|
|
||||||
cmd_args.push_back("--http-port");
|
cmd_args.push_back("--http-port");
|
||||||
cmd_args.push_back(std::to_string(config.getUInt(configPrefix() + ".port", getDefaultPort())));
|
cmd_args.push_back(std::to_string(config.getUInt(configPrefix() + ".port", getDefaultPort())));
|
||||||
@ -126,7 +127,7 @@ std::unique_ptr<ShellCommand> IBridgeHelper::startBridgeCommand() const
|
|||||||
|
|
||||||
LOG_TRACE(getLog(), "Starting {}", serviceAlias());
|
LOG_TRACE(getLog(), "Starting {}", serviceAlias());
|
||||||
|
|
||||||
return ShellCommand::executeDirect(path.toString(), cmd_args, ShellCommandDestructorStrategy(true));
|
return ShellCommand::executeDirect(path.string(), cmd_args, ShellCommandDestructorStrategy(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#include <IO/WriteBufferFromOStream.h>
|
#include <IO/WriteBufferFromOStream.h>
|
||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
#include <Formats/FormatFactory.h>
|
#include <Formats/FormatFactory.h>
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
#include <Common/ShellCommand.h>
|
#include <Common/ShellCommand.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
|
@ -5,10 +5,8 @@
|
|||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Access/AccessType.h>
|
#include <Access/AccessType.h>
|
||||||
#include <Parsers/IdentifierQuotingStyle.h>
|
#include <Parsers/IdentifierQuotingStyle.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
#include <Poco/Net/HTTPRequest.h>
|
#include <Poco/Net/HTTPRequest.h>
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Poco/URI.h>
|
#include <Poco/URI.h>
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
#include <Common/ShellCommand.h>
|
#include <Common/ShellCommand.h>
|
||||||
|
@ -62,7 +62,7 @@ static std::string numberFromHost(const std::string & s)
|
|||||||
|
|
||||||
bool ConfigProcessor::isPreprocessedFile(const std::string & path)
|
bool ConfigProcessor::isPreprocessedFile(const std::string & path)
|
||||||
{
|
{
|
||||||
return endsWith(Poco::Path(path).getBaseName(), PREPROCESSED_SUFFIX);
|
return endsWith(fs::path(path).stem(), PREPROCESSED_SUFFIX);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -416,34 +416,32 @@ ConfigProcessor::Files ConfigProcessor::getConfigMergeFiles(const std::string &
|
|||||||
{
|
{
|
||||||
Files files;
|
Files files;
|
||||||
|
|
||||||
Poco::Path merge_dir_path(config_path);
|
fs::path merge_dir_path(config_path);
|
||||||
std::set<std::string> merge_dirs;
|
std::set<std::string> merge_dirs;
|
||||||
|
|
||||||
/// Add path_to_config/config_name.d dir
|
/// Add path_to_config/config_name.d dir
|
||||||
merge_dir_path.setExtension("d");
|
merge_dir_path.replace_extension("d");
|
||||||
merge_dirs.insert(merge_dir_path.toString());
|
merge_dirs.insert(merge_dir_path);
|
||||||
/// Add path_to_config/conf.d dir
|
/// Add path_to_config/conf.d dir
|
||||||
merge_dir_path.setBaseName("conf");
|
merge_dir_path.replace_filename("conf.d");
|
||||||
merge_dirs.insert(merge_dir_path.toString());
|
merge_dirs.insert(merge_dir_path);
|
||||||
|
|
||||||
for (const std::string & merge_dir_name : merge_dirs)
|
for (const std::string & merge_dir_name : merge_dirs)
|
||||||
{
|
{
|
||||||
Poco::File merge_dir(merge_dir_name);
|
if (!fs::exists(merge_dir_name) || !fs::is_directory(merge_dir_name))
|
||||||
if (!merge_dir.exists() || !merge_dir.isDirectory())
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (Poco::DirectoryIterator it(merge_dir_name); it != Poco::DirectoryIterator(); ++it)
|
for (fs::directory_iterator it(merge_dir_name); it != fs::directory_iterator(); ++it)
|
||||||
{
|
{
|
||||||
Poco::File & file = *it;
|
fs::path path(it->path());
|
||||||
Poco::Path path(file.path());
|
std::string extension = path.extension();
|
||||||
std::string extension = path.getExtension();
|
std::string base_name = path.stem();
|
||||||
std::string base_name = path.getBaseName();
|
|
||||||
|
|
||||||
// Skip non-config and temporary files
|
// Skip non-config and temporary files
|
||||||
if (file.isFile() && (extension == "xml" || extension == "conf" || extension == "yaml" || extension == "yml") && !startsWith(base_name, "."))
|
if (fs::is_regular_file(path)
|
||||||
{
|
&& (extension == ".xml" || extension == ".conf" || extension == ".yaml" || extension == ".yml")
|
||||||
files.push_back(file.path());
|
&& !startsWith(base_name, "."))
|
||||||
}
|
files.push_back(it->path());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -548,7 +546,7 @@ XMLDocumentPtr ConfigProcessor::processConfig(
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
std::string default_path = "/etc/metrika.xml";
|
std::string default_path = "/etc/metrika.xml";
|
||||||
if (Poco::File(default_path).exists())
|
if (fs::exists(default_path))
|
||||||
include_from_path = default_path;
|
include_from_path = default_path;
|
||||||
}
|
}
|
||||||
if (!include_from_path.empty())
|
if (!include_from_path.empty())
|
||||||
@ -660,11 +658,11 @@ void ConfigProcessor::savePreprocessedConfig(const LoadedConfig & loaded_config,
|
|||||||
if (!loaded_config.configuration->has("path"))
|
if (!loaded_config.configuration->has("path"))
|
||||||
{
|
{
|
||||||
// Will use current directory
|
// Will use current directory
|
||||||
auto parent_path = Poco::Path(loaded_config.config_path).makeParent();
|
fs::path parent_path = fs::path(loaded_config.config_path).parent_path();
|
||||||
preprocessed_dir = parent_path.toString();
|
preprocessed_dir = parent_path.string();
|
||||||
Poco::Path poco_new_path(new_path);
|
fs::path fs_new_path(new_path);
|
||||||
poco_new_path.setBaseName(poco_new_path.getBaseName() + PREPROCESSED_SUFFIX);
|
fs_new_path.replace_filename(fs_new_path.stem().string() + PREPROCESSED_SUFFIX + fs_new_path.extension().string());
|
||||||
new_path = poco_new_path.toString();
|
new_path = fs_new_path.string();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -679,9 +677,9 @@ void ConfigProcessor::savePreprocessedConfig(const LoadedConfig & loaded_config,
|
|||||||
}
|
}
|
||||||
|
|
||||||
preprocessed_path = (fs::path(preprocessed_dir) / fs::path(new_path)).string();
|
preprocessed_path = (fs::path(preprocessed_dir) / fs::path(new_path)).string();
|
||||||
auto preprocessed_path_parent = Poco::Path(preprocessed_path).makeParent();
|
auto preprocessed_path_parent = fs::path(preprocessed_path).parent_path();
|
||||||
if (!preprocessed_path_parent.toString().empty())
|
if (!preprocessed_path_parent.empty())
|
||||||
Poco::File(preprocessed_path_parent).createDirectories();
|
fs::create_directories(preprocessed_path_parent);
|
||||||
}
|
}
|
||||||
DOMWriter().writeNode(preprocessed_path, loaded_config.preprocessed_xml);
|
DOMWriter().writeNode(preprocessed_path, loaded_config.preprocessed_xml);
|
||||||
LOG_DEBUG(log, "Saved preprocessed configuration to '{}'.", preprocessed_path);
|
LOG_DEBUG(log, "Saved preprocessed configuration to '{}'.", preprocessed_path);
|
||||||
|
@ -15,12 +15,9 @@
|
|||||||
#include <Poco/DOM/NodeList.h>
|
#include <Poco/DOM/NodeList.h>
|
||||||
#include <Poco/DOM/NamedNodeMap.h>
|
#include <Poco/DOM/NamedNodeMap.h>
|
||||||
#include <Poco/AutoPtr.h>
|
#include <Poco/AutoPtr.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Poco/DirectoryIterator.h>
|
#include <Poco/DirectoryIterator.h>
|
||||||
#include <Poco/ConsoleChannel.h>
|
#include <Poco/ConsoleChannel.h>
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
|
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
#include "ConfigReloader.h"
|
#include "ConfigReloader.h"
|
||||||
|
|
||||||
#include <Poco/Util/Application.h>
|
#include <Poco/Util/Application.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
#include "ConfigProcessor.h"
|
#include "ConfigProcessor.h"
|
||||||
|
#include <filesystem>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -167,8 +170,8 @@ struct ConfigReloader::FileWithTimestamp
|
|||||||
|
|
||||||
void ConfigReloader::FilesChangesTracker::addIfExists(const std::string & path_to_add)
|
void ConfigReloader::FilesChangesTracker::addIfExists(const std::string & path_to_add)
|
||||||
{
|
{
|
||||||
if (!path_to_add.empty() && Poco::File(path_to_add).exists())
|
if (!path_to_add.empty() && fs::exists(path_to_add))
|
||||||
files.emplace(path_to_add, Poco::File(path_to_add).getLastModified().epochTime());
|
files.emplace(path_to_add, FS::getModificationTime(path_to_add));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ConfigReloader::FilesChangesTracker::isDifferOrNewerThan(const FilesChangesTracker & rhs)
|
bool ConfigReloader::FilesChangesTracker::isDifferOrNewerThan(const FilesChangesTracker & rhs)
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
#include "configReadClient.h"
|
#include "configReadClient.h"
|
||||||
|
|
||||||
#include <Poco/Util/LayeredConfiguration.h>
|
#include <Poco/Util/LayeredConfiguration.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include "ConfigProcessor.h"
|
#include "ConfigProcessor.h"
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -11,11 +13,11 @@ bool configReadClient(Poco::Util::LayeredConfiguration & config, const std::stri
|
|||||||
std::string config_path;
|
std::string config_path;
|
||||||
if (config.has("config-file"))
|
if (config.has("config-file"))
|
||||||
config_path = config.getString("config-file");
|
config_path = config.getString("config-file");
|
||||||
else if (Poco::File("./clickhouse-client.xml").exists())
|
else if (fs::exists("./clickhouse-client.xml"))
|
||||||
config_path = "./clickhouse-client.xml";
|
config_path = "./clickhouse-client.xml";
|
||||||
else if (!home_path.empty() && Poco::File(home_path + "/.clickhouse-client/config.xml").exists())
|
else if (!home_path.empty() && fs::exists(home_path + "/.clickhouse-client/config.xml"))
|
||||||
config_path = home_path + "/.clickhouse-client/config.xml";
|
config_path = home_path + "/.clickhouse-client/config.xml";
|
||||||
else if (Poco::File("/etc/clickhouse-client/config.xml").exists())
|
else if (fs::exists("/etc/clickhouse-client/config.xml"))
|
||||||
config_path = "/etc/clickhouse-client/config.xml";
|
config_path = "/etc/clickhouse-client/config.xml";
|
||||||
|
|
||||||
if (!config_path.empty())
|
if (!config_path.empty())
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Exception.h>
|
#include <Poco/Exception.h>
|
||||||
|
|
||||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||||
@ -59,7 +58,7 @@ public:
|
|||||||
|
|
||||||
Int64 res = -1;
|
Int64 res = -1;
|
||||||
|
|
||||||
bool file_doesnt_exists = !Poco::File(path).exists();
|
bool file_doesnt_exists = !fs::exists(path);
|
||||||
if (file_doesnt_exists && !create_if_need)
|
if (file_doesnt_exists && !create_if_need)
|
||||||
{
|
{
|
||||||
throw Poco::Exception("File " + path + " does not exist. "
|
throw Poco::Exception("File " + path + " does not exist. "
|
||||||
@ -138,7 +137,7 @@ public:
|
|||||||
// Not thread-safe and not synchronized between processes.
|
// Not thread-safe and not synchronized between processes.
|
||||||
void fixIfBroken(UInt64 value)
|
void fixIfBroken(UInt64 value)
|
||||||
{
|
{
|
||||||
bool file_exists = Poco::File(path).exists();
|
bool file_exists = fs::exists(path);
|
||||||
|
|
||||||
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
|
int fd = ::open(path.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0666);
|
||||||
if (-1 == fd)
|
if (-1 == fd)
|
||||||
|
@ -553,6 +553,7 @@
|
|||||||
M(583, ILLEGAL_PROJECTION) \
|
M(583, ILLEGAL_PROJECTION) \
|
||||||
M(584, PROJECTION_NOT_USED) \
|
M(584, PROJECTION_NOT_USED) \
|
||||||
M(585, CANNOT_PARSE_YAML) \
|
M(585, CANNOT_PARSE_YAML) \
|
||||||
|
M(586, CANNOT_CREATE_FILE) \
|
||||||
\
|
\
|
||||||
M(998, POSTGRESQL_CONNECTION_FAILURE) \
|
M(998, POSTGRESQL_CONNECTION_FAILURE) \
|
||||||
M(999, KEEPER_EXCEPTION) \
|
M(999, KEEPER_EXCEPTION) \
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
# include <Common/config_version.h>
|
# include <Common/config_version.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -177,7 +179,7 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_
|
|||||||
tryLogCurrentExceptionImpl(logger, start_of_message);
|
tryLogCurrentExceptionImpl(logger, start_of_message);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void getNoSpaceLeftInfoMessage(std::filesystem::path path, std::string & msg)
|
static void getNoSpaceLeftInfoMessage(std::filesystem::path path, String & msg)
|
||||||
{
|
{
|
||||||
path = std::filesystem::absolute(path);
|
path = std::filesystem::absolute(path);
|
||||||
/// It's possible to get ENOSPC for non existent file (e.g. if there are no free inodes and creat() fails)
|
/// It's possible to get ENOSPC for non existent file (e.g. if there are no free inodes and creat() fails)
|
||||||
@ -264,23 +266,13 @@ static std::string getExtraExceptionInfo(const std::exception & e)
|
|||||||
String msg;
|
String msg;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
if (const auto * file_exception = dynamic_cast<const Poco::FileException *>(&e))
|
if (const auto * file_exception = dynamic_cast<const fs::filesystem_error *>(&e))
|
||||||
{
|
{
|
||||||
if (file_exception->code() == ENOSPC)
|
if (file_exception->code() == std::errc::no_space_on_device)
|
||||||
{
|
getNoSpaceLeftInfoMessage(file_exception->path1(), msg);
|
||||||
/// See Poco::FileImpl::handleLastErrorImpl(...)
|
|
||||||
constexpr const char * expected_error_message = "no space left on device: ";
|
|
||||||
if (startsWith(file_exception->message(), expected_error_message))
|
|
||||||
{
|
|
||||||
String path = file_exception->message().substr(strlen(expected_error_message));
|
|
||||||
getNoSpaceLeftInfoMessage(path, msg);
|
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
msg += "\nCannot print extra info for Poco::Exception";
|
msg += "\nCannot print extra info for Poco::Exception";
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (const auto * errno_exception = dynamic_cast<const DB::ErrnoException *>(&e))
|
else if (const auto * errno_exception = dynamic_cast<const DB::ErrnoException *>(&e))
|
||||||
{
|
{
|
||||||
if (errno_exception->getErrno() == ENOSPC && errno_exception->getPath())
|
if (errno_exception->getErrno() == ENOSPC && errno_exception->getPath())
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Timestamp.h>
|
#include <Poco/Timestamp.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
class FileUpdatesTracker
|
class FileUpdatesTracker
|
||||||
{
|
{
|
||||||
@ -31,6 +32,6 @@ public:
|
|||||||
private:
|
private:
|
||||||
Poco::Timestamp getLastModificationTime() const
|
Poco::Timestamp getLastModificationTime() const
|
||||||
{
|
{
|
||||||
return Poco::File(path).getLastModified();
|
return FS::getModificationTimestamp(path);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -34,7 +34,7 @@ void ProgressIndication::resetProgress()
|
|||||||
write_progress_on_update = false;
|
write_progress_on_update = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ProgressIndication::setFileProgressCallback(ContextPtr context, bool write_progress_on_update_)
|
void ProgressIndication::setFileProgressCallback(ContextMutablePtr context, bool write_progress_on_update_)
|
||||||
{
|
{
|
||||||
write_progress_on_update = write_progress_on_update_;
|
write_progress_on_update = write_progress_on_update_;
|
||||||
context->setFileProgressCallback([&](const FileProgress & file_progress)
|
context->setFileProgressCallback([&](const FileProgress & file_progress)
|
||||||
|
@ -36,7 +36,7 @@ public:
|
|||||||
/// In some cases there is a need to update progress value, when there is no access to progress_inidcation object.
|
/// In some cases there is a need to update progress value, when there is no access to progress_inidcation object.
|
||||||
/// In this case it is added via context.
|
/// In this case it is added via context.
|
||||||
/// `write_progress_on_update` is needed to write progress for loading files data via pipe in non-interactive mode.
|
/// `write_progress_on_update` is needed to write progress for loading files data via pipe in non-interactive mode.
|
||||||
void setFileProgressCallback(ContextPtr context, bool write_progress_on_update = false);
|
void setFileProgressCallback(ContextMutablePtr context, bool write_progress_on_update = false);
|
||||||
|
|
||||||
/// How much seconds passed since query execution start.
|
/// How much seconds passed since query execution start.
|
||||||
UInt64 elapsedSeconds() const { return watch.elapsedSeconds(); }
|
UInt64 elapsedSeconds() const { return watch.elapsedSeconds(); }
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <common/errnoToString.h>
|
#include <common/errnoToString.h>
|
||||||
#include <Common/ClickHouseRevision.h>
|
#include <Common/ClickHouseRevision.h>
|
||||||
@ -14,7 +13,9 @@
|
|||||||
#include <IO/LimitReadBuffer.h>
|
#include <IO/LimitReadBuffer.h>
|
||||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -45,7 +46,7 @@ StatusFile::StatusFile(std::string path_, FillFunction fill_)
|
|||||||
: path(std::move(path_)), fill(std::move(fill_))
|
: path(std::move(path_)), fill(std::move(fill_))
|
||||||
{
|
{
|
||||||
/// If file already exists. NOTE Minor race condition.
|
/// If file already exists. NOTE Minor race condition.
|
||||||
if (Poco::File(path).exists())
|
if (fs::exists(path))
|
||||||
{
|
{
|
||||||
std::string contents;
|
std::string contents;
|
||||||
{
|
{
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include "TestKeeper.h"
|
#include "TestKeeper.h"
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
#include <filesystem>
|
||||||
#include <pcg-random/pcg_random.hpp>
|
#include <pcg-random/pcg_random.hpp>
|
||||||
|
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
@ -17,6 +18,7 @@
|
|||||||
|
|
||||||
#define ZOOKEEPER_CONNECTION_TIMEOUT_MS 1000
|
#define ZOOKEEPER_CONNECTION_TIMEOUT_MS 1000
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -612,7 +614,7 @@ void ZooKeeper::removeChildren(const std::string & path)
|
|||||||
Coordination::Requests ops;
|
Coordination::Requests ops;
|
||||||
for (size_t i = 0; i < MULTI_BATCH_SIZE && !children.empty(); ++i)
|
for (size_t i = 0; i < MULTI_BATCH_SIZE && !children.empty(); ++i)
|
||||||
{
|
{
|
||||||
ops.emplace_back(makeRemoveRequest(path + "/" + children.back(), -1));
|
ops.emplace_back(makeRemoveRequest(fs::path(path) / children.back(), -1));
|
||||||
children.pop_back();
|
children.pop_back();
|
||||||
}
|
}
|
||||||
multi(ops);
|
multi(ops);
|
||||||
@ -628,9 +630,9 @@ void ZooKeeper::removeChildrenRecursive(const std::string & path, const String &
|
|||||||
Coordination::Requests ops;
|
Coordination::Requests ops;
|
||||||
for (size_t i = 0; i < MULTI_BATCH_SIZE && !children.empty(); ++i)
|
for (size_t i = 0; i < MULTI_BATCH_SIZE && !children.empty(); ++i)
|
||||||
{
|
{
|
||||||
removeChildrenRecursive(path + "/" + children.back());
|
removeChildrenRecursive(fs::path(path) / children.back());
|
||||||
if (likely(keep_child_node.empty() || keep_child_node != children.back()))
|
if (likely(keep_child_node.empty() || keep_child_node != children.back()))
|
||||||
ops.emplace_back(makeRemoveRequest(path + "/" + children.back(), -1));
|
ops.emplace_back(makeRemoveRequest(fs::path(path) / children.back(), -1));
|
||||||
children.pop_back();
|
children.pop_back();
|
||||||
}
|
}
|
||||||
multi(ops);
|
multi(ops);
|
||||||
@ -648,7 +650,7 @@ void ZooKeeper::tryRemoveChildrenRecursive(const std::string & path, const Strin
|
|||||||
Strings batch;
|
Strings batch;
|
||||||
for (size_t i = 0; i < MULTI_BATCH_SIZE && !children.empty(); ++i)
|
for (size_t i = 0; i < MULTI_BATCH_SIZE && !children.empty(); ++i)
|
||||||
{
|
{
|
||||||
String child_path = path + "/" + children.back();
|
String child_path = fs::path(path) / children.back();
|
||||||
tryRemoveChildrenRecursive(child_path);
|
tryRemoveChildrenRecursive(child_path);
|
||||||
if (likely(keep_child_node.empty() || keep_child_node != children.back()))
|
if (likely(keep_child_node.empty() || keep_child_node != children.back()))
|
||||||
{
|
{
|
||||||
|
@ -6,10 +6,15 @@
|
|||||||
# include <mntent.h>
|
# include <mntent.h>
|
||||||
#endif
|
#endif
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Poco/Version.h>
|
#include <Poco/Version.h>
|
||||||
|
#include <Poco/Timestamp.h>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <utime.h>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -20,6 +25,8 @@ namespace ErrorCodes
|
|||||||
extern const int SYSTEM_ERROR;
|
extern const int SYSTEM_ERROR;
|
||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
extern const int CANNOT_STATVFS;
|
extern const int CANNOT_STATVFS;
|
||||||
|
extern const int PATH_ACCESS_DENIED;
|
||||||
|
extern const int CANNOT_CREATE_FILE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -38,17 +45,13 @@ struct statvfs getStatVFS(const String & path)
|
|||||||
|
|
||||||
bool enoughSpaceInDirectory(const std::string & path [[maybe_unused]], size_t data_size [[maybe_unused]])
|
bool enoughSpaceInDirectory(const std::string & path [[maybe_unused]], size_t data_size [[maybe_unused]])
|
||||||
{
|
{
|
||||||
#if POCO_VERSION >= 0x01090000
|
auto free_space = fs::space(path).free;
|
||||||
auto free_space = Poco::File(path).freeSpace();
|
|
||||||
return data_size <= free_space;
|
return data_size <= free_space;
|
||||||
#else
|
|
||||||
return true;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<TemporaryFile> createTemporaryFile(const std::string & path)
|
std::unique_ptr<TemporaryFile> createTemporaryFile(const std::string & path)
|
||||||
{
|
{
|
||||||
Poco::File(path).createDirectories();
|
fs::create_directories(path);
|
||||||
|
|
||||||
/// NOTE: std::make_shared cannot use protected constructors
|
/// NOTE: std::make_shared cannot use protected constructors
|
||||||
return std::make_unique<TemporaryFile>(path);
|
return std::make_unique<TemporaryFile>(path);
|
||||||
@ -128,3 +131,73 @@ bool pathStartsWith(const String & path, const String & prefix_path)
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// Copied from Poco::File
|
||||||
|
namespace FS
|
||||||
|
{
|
||||||
|
|
||||||
|
bool createFile(const std::string & path)
|
||||||
|
{
|
||||||
|
int n = open(path.c_str(), O_WRONLY | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
|
||||||
|
if (n != -1)
|
||||||
|
{
|
||||||
|
close(n);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
DB::throwFromErrnoWithPath("Cannot create file: " + path, path, DB::ErrorCodes::CANNOT_CREATE_FILE);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool canRead(const std::string & path)
|
||||||
|
{
|
||||||
|
struct stat st;
|
||||||
|
if (stat(path.c_str(), &st) == 0)
|
||||||
|
{
|
||||||
|
if (st.st_uid == geteuid())
|
||||||
|
return (st.st_mode & S_IRUSR) != 0;
|
||||||
|
else if (st.st_gid == getegid())
|
||||||
|
return (st.st_mode & S_IRGRP) != 0;
|
||||||
|
else
|
||||||
|
return (st.st_mode & S_IROTH) != 0 || geteuid() == 0;
|
||||||
|
}
|
||||||
|
DB::throwFromErrnoWithPath("Cannot check read access to file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool canWrite(const std::string & path)
|
||||||
|
{
|
||||||
|
struct stat st;
|
||||||
|
if (stat(path.c_str(), &st) == 0)
|
||||||
|
{
|
||||||
|
if (st.st_uid == geteuid())
|
||||||
|
return (st.st_mode & S_IWUSR) != 0;
|
||||||
|
else if (st.st_gid == getegid())
|
||||||
|
return (st.st_mode & S_IWGRP) != 0;
|
||||||
|
else
|
||||||
|
return (st.st_mode & S_IWOTH) != 0 || geteuid() == 0;
|
||||||
|
}
|
||||||
|
DB::throwFromErrnoWithPath("Cannot check write access to file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
|
||||||
|
}
|
||||||
|
|
||||||
|
time_t getModificationTime(const std::string & path)
|
||||||
|
{
|
||||||
|
struct stat st;
|
||||||
|
if (stat(path.c_str(), &st) == 0)
|
||||||
|
return st.st_mtime;
|
||||||
|
DB::throwFromErrnoWithPath("Cannot check modification time for file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
|
||||||
|
}
|
||||||
|
|
||||||
|
Poco::Timestamp getModificationTimestamp(const std::string & path)
|
||||||
|
{
|
||||||
|
return Poco::Timestamp::fromEpochTime(getModificationTime(path));
|
||||||
|
}
|
||||||
|
|
||||||
|
void setModificationTime(const std::string & path, time_t time)
|
||||||
|
{
|
||||||
|
struct utimbuf tb;
|
||||||
|
tb.actime = time;
|
||||||
|
tb.modtime = time;
|
||||||
|
if (utime(path.c_str(), &tb) != 0)
|
||||||
|
DB::throwFromErrnoWithPath("Cannot set modification time for file: " + path, path, DB::ErrorCodes::PATH_ACCESS_DENIED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -36,3 +36,15 @@ bool pathStartsWith(const std::filesystem::path & path, const std::filesystem::p
|
|||||||
bool pathStartsWith(const String & path, const String & prefix_path);
|
bool pathStartsWith(const String & path, const String & prefix_path);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace FS
|
||||||
|
{
|
||||||
|
bool createFile(const std::string & path);
|
||||||
|
|
||||||
|
bool canRead(const std::string & path);
|
||||||
|
bool canWrite(const std::string & path);
|
||||||
|
|
||||||
|
time_t getModificationTime(const std::string & path);
|
||||||
|
Poco::Timestamp getModificationTimestamp(const std::string & path);
|
||||||
|
void setModificationTime(const std::string & path, time_t time);
|
||||||
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <Common/renameat2.h>
|
#include <Common/renameat2.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Poco/File.h>
|
#include <filesystem>
|
||||||
|
|
||||||
#if defined(linux) || defined(__linux) || defined(__linux__)
|
#if defined(linux) || defined(__linux) || defined(__linux__)
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
@ -10,6 +10,8 @@
|
|||||||
#include <sys/utsname.h>
|
#include <sys/utsname.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -93,9 +95,9 @@ static bool renameat2(const std::string &, const std::string &, int)
|
|||||||
static void renameNoReplaceFallback(const std::string & old_path, const std::string & new_path)
|
static void renameNoReplaceFallback(const std::string & old_path, const std::string & new_path)
|
||||||
{
|
{
|
||||||
/// NOTE it's unsafe
|
/// NOTE it's unsafe
|
||||||
if (Poco::File{new_path}.exists())
|
if (fs::exists(new_path))
|
||||||
throw Exception("File " + new_path + " exists", ErrorCodes::FILE_ALREADY_EXISTS);
|
throw Exception("File " + new_path + " exists", ErrorCodes::FILE_ALREADY_EXISTS);
|
||||||
Poco::File{old_path}.renameTo(new_path);
|
fs::rename(old_path, new_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Do not use [[noreturn]] to avoid warnings like "code will never be executed" in other places
|
/// Do not use [[noreturn]] to avoid warnings like "code will never be executed" in other places
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
#include <Databases/DatabaseAtomic.h>
|
#include <Databases/DatabaseAtomic.h>
|
||||||
#include <Databases/DatabaseOnDisk.h>
|
#include <Databases/DatabaseOnDisk.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
@ -13,6 +11,8 @@
|
|||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <Interpreters/DDLTask.h>
|
#include <Interpreters/DDLTask.h>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
@ -37,12 +37,12 @@ public:
|
|||||||
|
|
||||||
DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, const String & logger_name, ContextPtr context_)
|
DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, const String & logger_name, ContextPtr context_)
|
||||||
: DatabaseOrdinary(name_, std::move(metadata_path_), "store/", logger_name, context_)
|
: DatabaseOrdinary(name_, std::move(metadata_path_), "store/", logger_name, context_)
|
||||||
, path_to_table_symlinks(getContext()->getPath() + "data/" + escapeForFileName(name_) + "/")
|
, path_to_table_symlinks(fs::path(getContext()->getPath()) / "data" / escapeForFileName(name_) / "")
|
||||||
, path_to_metadata_symlink(getContext()->getPath() + "metadata/" + escapeForFileName(name_))
|
, path_to_metadata_symlink(fs::path(getContext()->getPath()) / "metadata" / escapeForFileName(name_))
|
||||||
, db_uuid(uuid)
|
, db_uuid(uuid)
|
||||||
{
|
{
|
||||||
assert(db_uuid != UUIDHelpers::Nil);
|
assert(db_uuid != UUIDHelpers::Nil);
|
||||||
Poco::File(path_to_table_symlinks).createDirectories();
|
fs::create_directories(path_to_table_symlinks);
|
||||||
tryCreateMetadataSymlink();
|
tryCreateMetadataSymlink();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,14 +73,14 @@ void DatabaseAtomic::drop(ContextPtr)
|
|||||||
assert(tables.empty());
|
assert(tables.empty());
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
Poco::File(path_to_metadata_symlink).remove();
|
fs::remove(path_to_metadata_symlink);
|
||||||
Poco::File(path_to_table_symlinks).remove(true);
|
fs::remove_all(path_to_table_symlinks);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, getCurrentExceptionMessage(true));
|
LOG_WARNING(log, getCurrentExceptionMessage(true));
|
||||||
}
|
}
|
||||||
Poco::File(getMetadataPath()).remove(true);
|
fs::remove_all(getMetadataPath());
|
||||||
}
|
}
|
||||||
|
|
||||||
void DatabaseAtomic::attachTable(const String & name, const StoragePtr & table, const String & relative_table_path)
|
void DatabaseAtomic::attachTable(const String & name, const StoragePtr & table, const String & relative_table_path)
|
||||||
@ -132,7 +132,7 @@ void DatabaseAtomic::dropTable(ContextPtr local_context, const String & table_na
|
|||||||
/// (it's more likely to lost connection, than to fail before applying local changes).
|
/// (it's more likely to lost connection, than to fail before applying local changes).
|
||||||
/// TODO better detection and recovery
|
/// TODO better detection and recovery
|
||||||
|
|
||||||
Poco::File(table_metadata_path).renameTo(table_metadata_path_drop); /// Mark table as dropped
|
fs::rename(table_metadata_path, table_metadata_path_drop); /// Mark table as dropped
|
||||||
DatabaseOrdinary::detachTableUnlocked(table_name, lock); /// Should never throw
|
DatabaseOrdinary::detachTableUnlocked(table_name, lock); /// Should never throw
|
||||||
table_name_to_path.erase(table_name);
|
table_name_to_path.erase(table_name);
|
||||||
}
|
}
|
||||||
@ -316,7 +316,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
Poco::File(table_metadata_tmp_path).remove();
|
fs::remove(table_metadata_tmp_path);
|
||||||
if (locked_uuid)
|
if (locked_uuid)
|
||||||
DatabaseCatalog::instance().removeUUIDMappingFinally(query.uuid);
|
DatabaseCatalog::instance().removeUUIDMappingFinally(query.uuid);
|
||||||
throw;
|
throw;
|
||||||
@ -420,7 +420,7 @@ void DatabaseAtomic::loadStoredObjects(ContextMutablePtr local_context, bool has
|
|||||||
{
|
{
|
||||||
/// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken
|
/// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken
|
||||||
if (has_force_restore_data_flag)
|
if (has_force_restore_data_flag)
|
||||||
Poco::File(path_to_table_symlinks).remove(true);
|
fs::remove_all(path_to_table_symlinks);
|
||||||
|
|
||||||
DatabaseOrdinary::loadStoredObjects(local_context, has_force_restore_data_flag, force_attach);
|
DatabaseOrdinary::loadStoredObjects(local_context, has_force_restore_data_flag, force_attach);
|
||||||
|
|
||||||
@ -432,7 +432,7 @@ void DatabaseAtomic::loadStoredObjects(ContextMutablePtr local_context, bool has
|
|||||||
table_names = table_name_to_path;
|
table_names = table_name_to_path;
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::File(path_to_table_symlinks).createDirectories();
|
fs::create_directories(path_to_table_symlinks);
|
||||||
for (const auto & table : table_names)
|
for (const auto & table : table_names)
|
||||||
tryCreateSymlink(table.first, table.second, true);
|
tryCreateSymlink(table.first, table.second, true);
|
||||||
}
|
}
|
||||||
@ -443,9 +443,9 @@ void DatabaseAtomic::tryCreateSymlink(const String & table_name, const String &
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
String link = path_to_table_symlinks + escapeForFileName(table_name);
|
String link = path_to_table_symlinks + escapeForFileName(table_name);
|
||||||
Poco::File data = Poco::Path(getContext()->getPath()).makeAbsolute().toString() + actual_data_path;
|
fs::path data = fs::canonical(getContext()->getPath()) / actual_data_path;
|
||||||
if (!if_data_path_exist || data.exists())
|
if (!if_data_path_exist || fs::exists(data))
|
||||||
data.linkTo(link, Poco::File::LINK_SYMBOLIC);
|
fs::create_directory_symlink(data, link);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -458,7 +458,7 @@ void DatabaseAtomic::tryRemoveSymlink(const String & table_name)
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
String path = path_to_table_symlinks + escapeForFileName(table_name);
|
String path = path_to_table_symlinks + escapeForFileName(table_name);
|
||||||
Poco::File{path}.remove();
|
fs::remove(path);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -471,17 +471,17 @@ void DatabaseAtomic::tryCreateMetadataSymlink()
|
|||||||
/// Symlinks in data/db_name/ directory and metadata/db_name/ are not used by ClickHouse,
|
/// Symlinks in data/db_name/ directory and metadata/db_name/ are not used by ClickHouse,
|
||||||
/// it's needed only for convenient introspection.
|
/// it's needed only for convenient introspection.
|
||||||
assert(path_to_metadata_symlink != metadata_path);
|
assert(path_to_metadata_symlink != metadata_path);
|
||||||
Poco::File metadata_symlink(path_to_metadata_symlink);
|
fs::path metadata_symlink(path_to_metadata_symlink);
|
||||||
if (metadata_symlink.exists())
|
if (fs::exists(metadata_symlink))
|
||||||
{
|
{
|
||||||
if (!metadata_symlink.isLink())
|
if (!fs::is_symlink(metadata_symlink))
|
||||||
throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Directory {} exists", path_to_metadata_symlink);
|
throw Exception(ErrorCodes::FILE_ALREADY_EXISTS, "Directory {} exists", path_to_metadata_symlink);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
Poco::File{metadata_path}.linkTo(path_to_metadata_symlink, Poco::File::LINK_SYMBOLIC);
|
fs::create_directory_symlink(metadata_path, path_to_metadata_symlink);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -495,7 +495,7 @@ void DatabaseAtomic::renameDatabase(const String & new_name)
|
|||||||
/// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard
|
/// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
Poco::File(path_to_metadata_symlink).remove();
|
fs::remove(path_to_metadata_symlink);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -526,7 +526,7 @@ void DatabaseAtomic::renameDatabase(const String & new_name)
|
|||||||
path_to_table_symlinks = getContext()->getPath() + "data/" + new_name_escaped + "/";
|
path_to_table_symlinks = getContext()->getPath() + "data/" + new_name_escaped + "/";
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::File(old_path_to_table_symlinks).renameTo(path_to_table_symlinks);
|
fs::rename(old_path_to_table_symlinks, path_to_table_symlinks);
|
||||||
tryCreateMetadataSymlink();
|
tryCreateMetadataSymlink();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,10 +11,9 @@
|
|||||||
#include <Parsers/ASTIdentifier.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
#include <Parsers/ASTLiteral.h>
|
#include <Parsers/ASTLiteral.h>
|
||||||
#include <Parsers/formatAST.h>
|
#include <Parsers/formatAST.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Common/Macros.h>
|
#include <Common/Macros.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
# include "config_core.h"
|
# include "config_core.h"
|
||||||
@ -40,6 +39,8 @@
|
|||||||
#include <Storages/PostgreSQL/PoolWithFailover.h>
|
#include <Storages/PostgreSQL/PoolWithFailover.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -58,11 +59,12 @@ DatabasePtr DatabaseFactory::get(const ASTCreateQuery & create, const String & m
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
/// Creates store/xxx/ for Atomic
|
/// Creates store/xxx/ for Atomic
|
||||||
Poco::File(Poco::Path(metadata_path).makeParent()).createDirectories();
|
fs::create_directories(fs::path(metadata_path).parent_path());
|
||||||
|
|
||||||
/// Before 20.7 it's possible that .sql metadata file does not exist for some old database.
|
/// Before 20.7 it's possible that .sql metadata file does not exist for some old database.
|
||||||
/// In this case Ordinary database is created on server startup if the corresponding metadata directory exists.
|
/// In this case Ordinary database is created on server startup if the corresponding metadata directory exists.
|
||||||
/// So we should remove metadata directory if database creation failed.
|
/// So we should remove metadata directory if database creation failed.
|
||||||
created = Poco::File(metadata_path).createDirectory();
|
created = fs::create_directory(metadata_path);
|
||||||
|
|
||||||
DatabasePtr impl = getImpl(create, metadata_path, context);
|
DatabasePtr impl = getImpl(create, metadata_path, context);
|
||||||
|
|
||||||
@ -74,11 +76,8 @@ DatabasePtr DatabaseFactory::get(const ASTCreateQuery & create, const String & m
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
Poco::File metadata_dir(metadata_path);
|
if (created && fs::exists(metadata_path))
|
||||||
|
fs::remove_all(metadata_path);
|
||||||
if (created && metadata_dir.exists())
|
|
||||||
metadata_dir.remove(true);
|
|
||||||
|
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,8 +12,9 @@
|
|||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <ext/scope_guard_safe.h>
|
#include <ext/scope_guard_safe.h>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <Poco/File.h>
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -43,8 +44,8 @@ void DatabaseLazy::loadStoredObjects(
|
|||||||
{
|
{
|
||||||
const std::string table_name = file_name.substr(0, file_name.size() - 4);
|
const std::string table_name = file_name.substr(0, file_name.size() - 4);
|
||||||
|
|
||||||
auto detached_permanently_flag = Poco::File(getMetadataPath() + "/" + file_name + detached_suffix);
|
fs::path detached_permanently_flag = fs::path(getMetadataPath()) / (file_name + detached_suffix);
|
||||||
if (detached_permanently_flag.exists())
|
if (fs::exists(detached_permanently_flag))
|
||||||
{
|
{
|
||||||
LOG_DEBUG(log, "Skipping permanently detached table {}.", backQuote(table_name));
|
LOG_DEBUG(log, "Skipping permanently detached table {}.", backQuote(table_name));
|
||||||
return;
|
return;
|
||||||
@ -228,7 +229,7 @@ StoragePtr DatabaseLazy::loadTable(const String & table_name) const
|
|||||||
|
|
||||||
LOG_DEBUG(log, "Load table {} to cache.", backQuote(table_name));
|
LOG_DEBUG(log, "Load table {} to cache.", backQuote(table_name));
|
||||||
|
|
||||||
const String table_metadata_path = getMetadataPath() + "/" + escapeForFileName(table_name) + ".sql";
|
const String table_metadata_path = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + ".sql");
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
@ -4,9 +4,9 @@
|
|||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Parsers/ASTCreateQuery.h>
|
#include <Parsers/ASTCreateQuery.h>
|
||||||
#include <Storages/IStorage.h>
|
#include <Storages/IStorage.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -42,9 +42,9 @@ void DatabaseMemory::dropTable(
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
table->drop();
|
table->drop();
|
||||||
Poco::File table_data_dir{getTableDataPath(table_name)};
|
fs::path table_data_dir{getTableDataPath(table_name)};
|
||||||
if (table_data_dir.exists())
|
if (fs::exists(table_data_dir))
|
||||||
table_data_dir.remove(true);
|
fs::remove_all(table_data_dir);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
@ -14,14 +14,14 @@
|
|||||||
#include <Storages/StorageFactory.h>
|
#include <Storages/StorageFactory.h>
|
||||||
#include <TableFunctions/TableFunctionFactory.h>
|
#include <TableFunctions/TableFunctionFactory.h>
|
||||||
#include <Common/escapeForFileName.h>
|
#include <Common/escapeForFileName.h>
|
||||||
|
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <Poco/DirectoryIterator.h>
|
|
||||||
|
|
||||||
#include <Databases/DatabaseOrdinary.h>
|
#include <Databases/DatabaseOrdinary.h>
|
||||||
#include <Databases/DatabaseAtomic.h>
|
#include <Databases/DatabaseAtomic.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -201,8 +201,8 @@ DatabaseOnDisk::DatabaseOnDisk(
|
|||||||
, metadata_path(metadata_path_)
|
, metadata_path(metadata_path_)
|
||||||
, data_path(data_path_)
|
, data_path(data_path_)
|
||||||
{
|
{
|
||||||
Poco::File(local_context->getPath() + data_path).createDirectories();
|
fs::create_directories(local_context->getPath() + data_path);
|
||||||
Poco::File(metadata_path).createDirectories();
|
fs::create_directories(metadata_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -245,7 +245,7 @@ void DatabaseOnDisk::createTable(
|
|||||||
if (!create.attach)
|
if (!create.attach)
|
||||||
checkMetadataFilenameAvailability(table_name);
|
checkMetadataFilenameAvailability(table_name);
|
||||||
|
|
||||||
if (create.attach && Poco::File(table_metadata_path).exists())
|
if (create.attach && fs::exists(table_metadata_path))
|
||||||
{
|
{
|
||||||
ASTPtr ast_detached = parseQueryFromMetadata(log, local_context, table_metadata_path);
|
ASTPtr ast_detached = parseQueryFromMetadata(log, local_context, table_metadata_path);
|
||||||
auto & create_detached = ast_detached->as<ASTCreateQuery &>();
|
auto & create_detached = ast_detached->as<ASTCreateQuery &>();
|
||||||
@ -285,10 +285,10 @@ void DatabaseOnDisk::removeDetachedPermanentlyFlag(ContextPtr, const String & ta
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
auto detached_permanently_flag = Poco::File(table_metadata_path + detached_suffix);
|
fs::path detached_permanently_flag(table_metadata_path + detached_suffix);
|
||||||
|
|
||||||
if (detached_permanently_flag.exists())
|
if (fs::exists(detached_permanently_flag))
|
||||||
detached_permanently_flag.remove();
|
fs::remove(detached_permanently_flag);
|
||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
{
|
{
|
||||||
@ -308,11 +308,11 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora
|
|||||||
|
|
||||||
/// If it was ATTACH query and file with table metadata already exist
|
/// If it was ATTACH query and file with table metadata already exist
|
||||||
/// (so, ATTACH is done after DETACH), then rename atomically replaces old file with new one.
|
/// (so, ATTACH is done after DETACH), then rename atomically replaces old file with new one.
|
||||||
Poco::File(table_metadata_tmp_path).renameTo(table_metadata_path);
|
fs::rename(table_metadata_tmp_path, table_metadata_path);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
Poco::File(table_metadata_tmp_path).remove();
|
fs::remove(table_metadata_tmp_path);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -321,10 +321,10 @@ void DatabaseOnDisk::detachTablePermanently(ContextPtr, const String & table_nam
|
|||||||
{
|
{
|
||||||
auto table = detachTable(table_name);
|
auto table = detachTable(table_name);
|
||||||
|
|
||||||
Poco::File detached_permanently_flag(getObjectMetadataPath(table_name) + detached_suffix);
|
fs::path detached_permanently_flag(getObjectMetadataPath(table_name) + detached_suffix);
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
detached_permanently_flag.createFile();
|
FS::createFile(detached_permanently_flag);
|
||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
{
|
{
|
||||||
@ -350,25 +350,25 @@ void DatabaseOnDisk::dropTable(ContextPtr local_context, const String & table_na
|
|||||||
bool renamed = false;
|
bool renamed = false;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
Poco::File(table_metadata_path).renameTo(table_metadata_path_drop);
|
fs::rename(table_metadata_path, table_metadata_path_drop);
|
||||||
renamed = true;
|
renamed = true;
|
||||||
table->drop();
|
table->drop();
|
||||||
table->is_dropped = true;
|
table->is_dropped = true;
|
||||||
|
|
||||||
Poco::File table_data_dir{local_context->getPath() + table_data_path_relative};
|
fs::path table_data_dir(local_context->getPath() + table_data_path_relative);
|
||||||
if (table_data_dir.exists())
|
if (fs::exists(table_data_dir))
|
||||||
table_data_dir.remove(true);
|
fs::remove_all(table_data_dir);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, getCurrentExceptionMessage(__PRETTY_FUNCTION__));
|
LOG_WARNING(log, getCurrentExceptionMessage(__PRETTY_FUNCTION__));
|
||||||
attachTable(table_name, table, table_data_path_relative);
|
attachTable(table_name, table, table_data_path_relative);
|
||||||
if (renamed)
|
if (renamed)
|
||||||
Poco::File(table_metadata_path_drop).renameTo(table_metadata_path);
|
fs::rename(table_metadata_path_drop, table_metadata_path);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::File(table_metadata_path_drop).remove();
|
fs::remove(table_metadata_path_drop);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DatabaseOnDisk::checkMetadataFilenameAvailability(const String & to_table_name) const
|
void DatabaseOnDisk::checkMetadataFilenameAvailability(const String & to_table_name) const
|
||||||
@ -381,11 +381,11 @@ void DatabaseOnDisk::checkMetadataFilenameAvailabilityUnlocked(const String & to
|
|||||||
{
|
{
|
||||||
String table_metadata_path = getObjectMetadataPath(to_table_name);
|
String table_metadata_path = getObjectMetadataPath(to_table_name);
|
||||||
|
|
||||||
if (Poco::File(table_metadata_path).exists())
|
if (fs::exists(table_metadata_path))
|
||||||
{
|
{
|
||||||
auto detached_permanently_flag = Poco::File(table_metadata_path + detached_suffix);
|
fs::path detached_permanently_flag(table_metadata_path + detached_suffix);
|
||||||
|
|
||||||
if (detached_permanently_flag.exists())
|
if (fs::exists(detached_permanently_flag))
|
||||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists (detached permanently)", backQuote(database_name), backQuote(to_table_name));
|
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists (detached permanently)", backQuote(database_name), backQuote(to_table_name));
|
||||||
else
|
else
|
||||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists (detached)", backQuote(database_name), backQuote(to_table_name));
|
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists (detached)", backQuote(database_name), backQuote(to_table_name));
|
||||||
@ -463,7 +463,7 @@ void DatabaseOnDisk::renameTable(
|
|||||||
/// Now table data are moved to new database, so we must add metadata and attach table to new database
|
/// Now table data are moved to new database, so we must add metadata and attach table to new database
|
||||||
to_database.createTable(local_context, to_table_name, table, attach_query);
|
to_database.createTable(local_context, to_table_name, table, attach_query);
|
||||||
|
|
||||||
Poco::File(table_metadata_path).remove();
|
fs::remove(table_metadata_path);
|
||||||
|
|
||||||
if (from_atomic_to_ordinary)
|
if (from_atomic_to_ordinary)
|
||||||
{
|
{
|
||||||
@ -528,8 +528,8 @@ ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const
|
|||||||
void DatabaseOnDisk::drop(ContextPtr local_context)
|
void DatabaseOnDisk::drop(ContextPtr local_context)
|
||||||
{
|
{
|
||||||
assert(tables.empty());
|
assert(tables.empty());
|
||||||
Poco::File(local_context->getPath() + getDataPath()).remove(false);
|
fs::remove(local_context->getPath() + getDataPath());
|
||||||
Poco::File(getMetadataPath()).remove(false);
|
fs::remove(getMetadataPath());
|
||||||
}
|
}
|
||||||
|
|
||||||
String DatabaseOnDisk::getObjectMetadataPath(const String & object_name) const
|
String DatabaseOnDisk::getObjectMetadataPath(const String & object_name) const
|
||||||
@ -540,10 +540,9 @@ String DatabaseOnDisk::getObjectMetadataPath(const String & object_name) const
|
|||||||
time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_name) const
|
time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_name) const
|
||||||
{
|
{
|
||||||
String table_metadata_path = getObjectMetadataPath(object_name);
|
String table_metadata_path = getObjectMetadataPath(object_name);
|
||||||
Poco::File meta_file(table_metadata_path);
|
|
||||||
|
|
||||||
if (meta_file.exists())
|
if (fs::exists(table_metadata_path))
|
||||||
return meta_file.getLastModified().epochTime();
|
return FS::getModificationTime(table_metadata_path);
|
||||||
else
|
else
|
||||||
return static_cast<time_t>(0);
|
return static_cast<time_t>(0);
|
||||||
}
|
}
|
||||||
@ -555,56 +554,57 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat
|
|||||||
assert(getUUID() == UUIDHelpers::Nil);
|
assert(getUUID() == UUIDHelpers::Nil);
|
||||||
static const char * tmp_drop_ext = ".sql.tmp_drop";
|
static const char * tmp_drop_ext = ".sql.tmp_drop";
|
||||||
const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext));
|
const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext));
|
||||||
if (Poco::File(local_context->getPath() + getDataPath() + '/' + object_name).exists())
|
|
||||||
|
if (fs::exists(local_context->getPath() + getDataPath() + '/' + object_name))
|
||||||
{
|
{
|
||||||
Poco::File(getMetadataPath() + file_name).renameTo(getMetadataPath() + object_name + ".sql");
|
fs::rename(getMetadataPath() + file_name, getMetadataPath() + object_name + ".sql");
|
||||||
LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name));
|
LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name));
|
||||||
process_metadata_file(object_name + ".sql");
|
process_metadata_file(object_name + ".sql");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_INFO(log, "Removing file {}", getMetadataPath() + file_name);
|
LOG_INFO(log, "Removing file {}", getMetadataPath() + file_name);
|
||||||
Poco::File(getMetadataPath() + file_name).remove();
|
fs::remove(getMetadataPath() + file_name);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Metadata files to load: name and flag for .tmp_drop files
|
/// Metadata files to load: name and flag for .tmp_drop files
|
||||||
std::set<std::pair<String, bool>> metadata_files;
|
std::set<std::pair<String, bool>> metadata_files;
|
||||||
|
|
||||||
Poco::DirectoryIterator dir_end;
|
fs::directory_iterator dir_end;
|
||||||
for (Poco::DirectoryIterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it)
|
for (fs::directory_iterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it)
|
||||||
{
|
{
|
||||||
|
String file_name = dir_it->path().filename();
|
||||||
/// For '.svn', '.gitignore' directory and similar.
|
/// For '.svn', '.gitignore' directory and similar.
|
||||||
if (dir_it.name().at(0) == '.')
|
if (file_name.at(0) == '.')
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/// There are .sql.bak files - skip them.
|
/// There are .sql.bak files - skip them.
|
||||||
if (endsWith(dir_it.name(), ".sql.bak"))
|
if (endsWith(file_name, ".sql.bak"))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/// Permanently detached table flag
|
/// Permanently detached table flag
|
||||||
if (endsWith(dir_it.name(), ".sql.detached"))
|
if (endsWith(file_name, ".sql.detached"))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (endsWith(dir_it.name(), ".sql.tmp_drop"))
|
if (endsWith(file_name, ".sql.tmp_drop"))
|
||||||
{
|
{
|
||||||
/// There are files that we tried to delete previously
|
/// There are files that we tried to delete previously
|
||||||
metadata_files.emplace(dir_it.name(), false);
|
metadata_files.emplace(file_name, false);
|
||||||
}
|
}
|
||||||
else if (endsWith(dir_it.name(), ".sql.tmp"))
|
else if (endsWith(file_name, ".sql.tmp"))
|
||||||
{
|
{
|
||||||
/// There are files .sql.tmp - delete
|
/// There are files .sql.tmp - delete
|
||||||
LOG_INFO(log, "Removing file {}", dir_it->path());
|
LOG_INFO(log, "Removing file {}", dir_it->path().string());
|
||||||
Poco::File(dir_it->path()).remove();
|
fs::remove(dir_it->path());
|
||||||
}
|
}
|
||||||
else if (endsWith(dir_it.name(), ".sql"))
|
else if (endsWith(file_name, ".sql"))
|
||||||
{
|
{
|
||||||
/// The required files have names like `table_name.sql`
|
/// The required files have names like `table_name.sql`
|
||||||
metadata_files.emplace(dir_it.name(), true);
|
metadata_files.emplace(file_name, true);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw Exception("Incorrect file extension: " + dir_it.name() + " in metadata directory " + getMetadataPath(),
|
throw Exception(ErrorCodes::INCORRECT_FILE_NAME, "Incorrect file extension: {} in metadata directory {}", file_name, getMetadataPath());
|
||||||
ErrorCodes::INCORRECT_FILE_NAME);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read and parse metadata in parallel
|
/// Read and parse metadata in parallel
|
||||||
@ -651,7 +651,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(
|
|||||||
{
|
{
|
||||||
if (logger)
|
if (logger)
|
||||||
LOG_ERROR(logger, "File {} is empty. Removing.", metadata_file_path);
|
LOG_ERROR(logger, "File {} is empty. Removing.", metadata_file_path);
|
||||||
Poco::File(metadata_file_path).remove();
|
fs::remove(metadata_file_path);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -670,8 +670,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(
|
|||||||
auto & create = ast->as<ASTCreateQuery &>();
|
auto & create = ast->as<ASTCreateQuery &>();
|
||||||
if (!create.table.empty() && create.uuid != UUIDHelpers::Nil)
|
if (!create.table.empty() && create.uuid != UUIDHelpers::Nil)
|
||||||
{
|
{
|
||||||
String table_name = Poco::Path(metadata_file_path).makeFile().getBaseName();
|
String table_name = unescapeForFileName(fs::path(metadata_file_path).stem());
|
||||||
table_name = unescapeForFileName(table_name);
|
|
||||||
|
|
||||||
if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger)
|
if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger)
|
||||||
LOG_WARNING(
|
LOG_WARNING(
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include <Parsers/formatAST.h>
|
#include <Parsers/formatAST.h>
|
||||||
#include <Parsers/parseQuery.h>
|
#include <Parsers/parseQuery.h>
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
#include <Poco/DirectoryIterator.h>
|
|
||||||
#include <Common/Stopwatch.h>
|
#include <Common/Stopwatch.h>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
#include <Common/escapeForFileName.h>
|
#include <Common/escapeForFileName.h>
|
||||||
@ -111,8 +110,7 @@ void DatabaseOrdinary::loadStoredObjects(ContextMutablePtr local_context, bool h
|
|||||||
auto * create_query = ast->as<ASTCreateQuery>();
|
auto * create_query = ast->as<ASTCreateQuery>();
|
||||||
create_query->database = database_name;
|
create_query->database = database_name;
|
||||||
|
|
||||||
auto detached_permanently_flag = Poco::File(full_path.string() + detached_suffix);
|
if (fs::exists(full_path.string() + detached_suffix))
|
||||||
if (detached_permanently_flag.exists())
|
|
||||||
{
|
{
|
||||||
/// FIXME: even if we don't load the table we can still mark the uuid of it as taken.
|
/// FIXME: even if we don't load the table we can still mark the uuid of it as taken.
|
||||||
/// if (create_query->uuid != UUIDHelpers::Nil)
|
/// if (create_query->uuid != UUIDHelpers::Nil)
|
||||||
@ -281,11 +279,11 @@ void DatabaseOrdinary::commitAlterTable(const StorageID &, const String & table_
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
/// rename atomically replaces the old file with the new one.
|
/// rename atomically replaces the old file with the new one.
|
||||||
Poco::File(table_metadata_tmp_path).renameTo(table_metadata_path);
|
fs::rename(table_metadata_tmp_path, table_metadata_path);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
Poco::File(table_metadata_tmp_path).remove();
|
fs::remove(table_metadata_tmp_path);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(bool force_attach)
|
|||||||
createDatabaseNodesInZooKeeper(current_zookeeper);
|
createDatabaseNodesInZooKeeper(current_zookeeper);
|
||||||
}
|
}
|
||||||
|
|
||||||
replica_path = zookeeper_path + "/replicas/" + getFullReplicaName();
|
replica_path = fs::path(zookeeper_path) / "replicas" / getFullReplicaName();
|
||||||
|
|
||||||
String replica_host_id;
|
String replica_host_id;
|
||||||
if (current_zookeeper->tryGet(replica_path, replica_host_id))
|
if (current_zookeeper->tryGet(replica_path, replica_host_id))
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
#include <Databases/DatabaseReplicatedWorker.h>
|
#include <Databases/DatabaseReplicatedWorker.h>
|
||||||
#include <Databases/DatabaseReplicated.h>
|
#include <Databases/DatabaseReplicated.h>
|
||||||
#include <Interpreters/DDLTask.h>
|
#include <Interpreters/DDLTask.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -156,7 +159,7 @@ DDLTaskPtr DatabaseReplicatedDDLWorker::initAndCheckTask(const String & entry_na
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt32 our_log_ptr = parse<UInt32>(current_zookeeper->get(database->replica_path + "/log_ptr"));
|
UInt32 our_log_ptr = parse<UInt32>(current_zookeeper->get(fs::path(database->replica_path) / "log_ptr"));
|
||||||
UInt32 entry_num = DatabaseReplicatedTask::getLogEntryNumber(entry_name);
|
UInt32 entry_num = DatabaseReplicatedTask::getLogEntryNumber(entry_name);
|
||||||
|
|
||||||
if (entry_num <= our_log_ptr)
|
if (entry_num <= our_log_ptr)
|
||||||
@ -165,13 +168,13 @@ DDLTaskPtr DatabaseReplicatedDDLWorker::initAndCheckTask(const String & entry_na
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
String entry_path = queue_dir + "/" + entry_name;
|
String entry_path = fs::path(queue_dir) / entry_name;
|
||||||
auto task = std::make_unique<DatabaseReplicatedTask>(entry_name, entry_path, database);
|
auto task = std::make_unique<DatabaseReplicatedTask>(entry_name, entry_path, database);
|
||||||
|
|
||||||
String initiator_name;
|
String initiator_name;
|
||||||
zkutil::EventPtr wait_committed_or_failed = std::make_shared<Poco::Event>();
|
zkutil::EventPtr wait_committed_or_failed = std::make_shared<Poco::Event>();
|
||||||
|
|
||||||
String try_node_path = entry_path + "/try";
|
String try_node_path = fs::path(entry_path) / "try";
|
||||||
if (zookeeper->tryGet(try_node_path, initiator_name, nullptr, wait_committed_or_failed))
|
if (zookeeper->tryGet(try_node_path, initiator_name, nullptr, wait_committed_or_failed))
|
||||||
{
|
{
|
||||||
task->is_initial_query = initiator_name == task->host_id_str;
|
task->is_initial_query = initiator_name == task->host_id_str;
|
||||||
@ -203,7 +206,7 @@ DDLTaskPtr DatabaseReplicatedDDLWorker::initAndCheckTask(const String & entry_na
|
|||||||
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE)
|
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE)
|
||||||
throw Coordination::Exception(code, try_node_path);
|
throw Coordination::Exception(code, try_node_path);
|
||||||
|
|
||||||
if (!zookeeper->exists(entry_path + "/committed"))
|
if (!zookeeper->exists(fs::path(entry_path) / "committed"))
|
||||||
{
|
{
|
||||||
out_reason = fmt::format("Entry {} was forcefully cancelled due to timeout", entry_name);
|
out_reason = fmt::format("Entry {} was forcefully cancelled due to timeout", entry_name);
|
||||||
return {};
|
return {};
|
||||||
@ -212,7 +215,7 @@ DDLTaskPtr DatabaseReplicatedDDLWorker::initAndCheckTask(const String & entry_na
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!zookeeper->exists(entry_path + "/committed"))
|
if (!zookeeper->exists(fs::path(entry_path) / "committed"))
|
||||||
{
|
{
|
||||||
out_reason = fmt::format("Entry {} hasn't been committed", entry_name);
|
out_reason = fmt::format("Entry {} hasn't been committed", entry_name);
|
||||||
return {};
|
return {};
|
||||||
@ -220,8 +223,8 @@ DDLTaskPtr DatabaseReplicatedDDLWorker::initAndCheckTask(const String & entry_na
|
|||||||
|
|
||||||
if (task->is_initial_query)
|
if (task->is_initial_query)
|
||||||
{
|
{
|
||||||
assert(!zookeeper->exists(entry_path + "/try"));
|
assert(!zookeeper->exists(fs::path(entry_path) / "try"));
|
||||||
assert(zookeeper->exists(entry_path + "/committed") == (zookeeper->get(task->getFinishedNodePath()) == ExecutionStatus(0).serializeText()));
|
assert(zookeeper->exists(fs::path(entry_path) / "committed") == (zookeeper->get(task->getFinishedNodePath()) == ExecutionStatus(0).serializeText()));
|
||||||
out_reason = fmt::format("Entry {} has been executed as initial query", entry_name);
|
out_reason = fmt::format("Entry {} has been executed as initial query", entry_name);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -257,7 +260,7 @@ DDLTaskPtr DatabaseReplicatedDDLWorker::initAndCheckTask(const String & entry_na
|
|||||||
bool DatabaseReplicatedDDLWorker::canRemoveQueueEntry(const String & entry_name, const Coordination::Stat &)
|
bool DatabaseReplicatedDDLWorker::canRemoveQueueEntry(const String & entry_name, const Coordination::Stat &)
|
||||||
{
|
{
|
||||||
UInt32 entry_number = DDLTaskBase::getLogEntryNumber(entry_name);
|
UInt32 entry_number = DDLTaskBase::getLogEntryNumber(entry_name);
|
||||||
UInt32 max_log_ptr = parse<UInt32>(getAndSetZooKeeper()->get(database->zookeeper_path + "/max_log_ptr"));
|
UInt32 max_log_ptr = parse<UInt32>(getAndSetZooKeeper()->get(fs::path(database->zookeeper_path) / "max_log_ptr"));
|
||||||
return entry_number + logs_to_keep < max_log_ptr;
|
return entry_number + logs_to_keep < max_log_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,10 +24,10 @@
|
|||||||
# include <Common/escapeForFileName.h>
|
# include <Common/escapeForFileName.h>
|
||||||
# include <Common/parseAddress.h>
|
# include <Common/parseAddress.h>
|
||||||
# include <Common/setThreadName.h>
|
# include <Common/setThreadName.h>
|
||||||
|
# include <filesystem>
|
||||||
|
# include <Common/filesystemHelpers.h>
|
||||||
|
|
||||||
# include <Poco/DirectoryIterator.h>
|
namespace fs = std::filesystem;
|
||||||
# include <Poco/File.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -326,7 +326,7 @@ void DatabaseConnectionMySQL::shutdown()
|
|||||||
|
|
||||||
void DatabaseConnectionMySQL::drop(ContextPtr /*context*/)
|
void DatabaseConnectionMySQL::drop(ContextPtr /*context*/)
|
||||||
{
|
{
|
||||||
Poco::File(getMetadataPath()).remove(true);
|
fs::remove_all(getMetadataPath());
|
||||||
}
|
}
|
||||||
|
|
||||||
void DatabaseConnectionMySQL::cleanOutdatedTables()
|
void DatabaseConnectionMySQL::cleanOutdatedTables()
|
||||||
@ -372,10 +372,10 @@ void DatabaseConnectionMySQL::attachTable(const String & table_name, const Stora
|
|||||||
local_tables_cache[table_name].second = storage;
|
local_tables_cache[table_name].second = storage;
|
||||||
|
|
||||||
remove_or_detach_tables.erase(table_name);
|
remove_or_detach_tables.erase(table_name);
|
||||||
Poco::File remove_flag(getMetadataPath() + '/' + escapeForFileName(table_name) + suffix);
|
fs::path remove_flag = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix);
|
||||||
|
|
||||||
if (remove_flag.exists())
|
if (fs::exists(remove_flag))
|
||||||
remove_flag.remove();
|
fs::remove(remove_flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
StoragePtr DatabaseConnectionMySQL::detachTable(const String & table_name)
|
StoragePtr DatabaseConnectionMySQL::detachTable(const String & table_name)
|
||||||
@ -403,13 +403,13 @@ void DatabaseConnectionMySQL::loadStoredObjects(ContextMutablePtr, bool, bool /*
|
|||||||
{
|
{
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock{mutex};
|
std::lock_guard<std::mutex> lock{mutex};
|
||||||
Poco::DirectoryIterator iterator(getMetadataPath());
|
fs::directory_iterator iter(getMetadataPath());
|
||||||
|
|
||||||
for (Poco::DirectoryIterator end; iterator != end; ++iterator)
|
for (fs::directory_iterator end; iter != end; ++iter)
|
||||||
{
|
{
|
||||||
if (iterator->isFile() && endsWith(iterator.name(), suffix))
|
if (fs::is_regular_file(iter->path()) && endsWith(iter->path().filename(), suffix))
|
||||||
{
|
{
|
||||||
const auto & filename = iterator.name();
|
const auto & filename = iter->path().filename().string();
|
||||||
const auto & table_name = unescapeForFileName(filename.substr(0, filename.size() - strlen(suffix)));
|
const auto & table_name = unescapeForFileName(filename.substr(0, filename.size() - strlen(suffix)));
|
||||||
remove_or_detach_tables.emplace(table_name);
|
remove_or_detach_tables.emplace(table_name);
|
||||||
}
|
}
|
||||||
@ -420,27 +420,25 @@ void DatabaseConnectionMySQL::detachTablePermanently(ContextPtr, const String &
|
|||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock{mutex};
|
std::lock_guard<std::mutex> lock{mutex};
|
||||||
|
|
||||||
Poco::File remove_flag(getMetadataPath() + '/' + escapeForFileName(table_name) + suffix);
|
fs::path remove_flag = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix);
|
||||||
|
|
||||||
if (remove_or_detach_tables.count(table_name))
|
if (remove_or_detach_tables.count(table_name))
|
||||||
throw Exception("Table " + backQuoteIfNeed(database_name) + "." + backQuoteIfNeed(table_name) + " is dropped",
|
throw Exception(ErrorCodes::TABLE_IS_DROPPED, "Table {}.{} is dropped", backQuoteIfNeed(database_name), backQuoteIfNeed(table_name));
|
||||||
ErrorCodes::TABLE_IS_DROPPED);
|
|
||||||
|
|
||||||
if (remove_flag.exists())
|
if (fs::exists(remove_flag))
|
||||||
throw Exception("The remove flag file already exists but the " + backQuoteIfNeed(database_name) +
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "The remove flag file already exists but the {}.{} does not exists remove tables, it is bug.",
|
||||||
"." + backQuoteIfNeed(table_name) + " does not exists remove tables, it is bug.", ErrorCodes::LOGICAL_ERROR);
|
backQuoteIfNeed(database_name), backQuoteIfNeed(table_name));
|
||||||
|
|
||||||
auto table_iter = local_tables_cache.find(table_name);
|
auto table_iter = local_tables_cache.find(table_name);
|
||||||
if (table_iter == local_tables_cache.end())
|
if (table_iter == local_tables_cache.end())
|
||||||
throw Exception("Table " + backQuoteIfNeed(database_name) + "." + backQuoteIfNeed(table_name) + " doesn't exist.",
|
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuoteIfNeed(database_name), backQuoteIfNeed(table_name));
|
||||||
ErrorCodes::UNKNOWN_TABLE);
|
|
||||||
|
|
||||||
remove_or_detach_tables.emplace(table_name);
|
remove_or_detach_tables.emplace(table_name);
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
table_iter->second.second->drop();
|
table_iter->second.second->drop();
|
||||||
remove_flag.createFile();
|
FS::createFile(remove_flag);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
@ -13,9 +13,11 @@
|
|||||||
# include <Databases/MySQL/MaterializeMySQLSyncThread.h>
|
# include <Databases/MySQL/MaterializeMySQLSyncThread.h>
|
||||||
# include <Parsers/ASTCreateQuery.h>
|
# include <Parsers/ASTCreateQuery.h>
|
||||||
# include <Storages/StorageMaterializeMySQL.h>
|
# include <Storages/StorageMaterializeMySQL.h>
|
||||||
# include <Poco/File.h>
|
|
||||||
# include <Poco/Logger.h>
|
# include <Poco/Logger.h>
|
||||||
# include <Common/setThreadName.h>
|
# include <Common/setThreadName.h>
|
||||||
|
# include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -158,10 +160,10 @@ template<typename Base>
|
|||||||
void DatabaseMaterializeMySQL<Base>::drop(ContextPtr context_)
|
void DatabaseMaterializeMySQL<Base>::drop(ContextPtr context_)
|
||||||
{
|
{
|
||||||
/// Remove metadata info
|
/// Remove metadata info
|
||||||
Poco::File metadata(Base::getMetadataPath() + "/.metadata");
|
fs::path metadata(Base::getMetadataPath() + "/.metadata");
|
||||||
|
|
||||||
if (metadata.exists())
|
if (fs::exists(metadata))
|
||||||
metadata.remove(false);
|
fs::remove(metadata);
|
||||||
|
|
||||||
Base::drop(context_);
|
Base::drop(context_);
|
||||||
}
|
}
|
||||||
|
@ -8,11 +8,13 @@
|
|||||||
#include <Formats/MySQLBlockInputStream.h>
|
#include <Formats/MySQLBlockInputStream.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
#include <IO/WriteBufferFromFile.h>
|
#include <IO/WriteBufferFromFile.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -193,12 +195,11 @@ void commitMetadata(const std::function<void()> & function, const String & persi
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
function();
|
function();
|
||||||
|
fs::rename(persistent_tmp_path, persistent_path);
|
||||||
Poco::File(persistent_tmp_path).renameTo(persistent_path);
|
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
Poco::File(persistent_tmp_path).remove();
|
fs::remove(persistent_tmp_path);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -231,7 +232,7 @@ void MaterializeMetadata::transaction(const MySQLReplication::Position & positio
|
|||||||
|
|
||||||
MaterializeMetadata::MaterializeMetadata(const String & path_, const Settings & settings_) : persistent_path(path_), settings(settings_)
|
MaterializeMetadata::MaterializeMetadata(const String & path_, const Settings & settings_) : persistent_path(path_), settings(settings_)
|
||||||
{
|
{
|
||||||
if (Poco::File(persistent_path).exists())
|
if (fs::exists(persistent_path))
|
||||||
{
|
{
|
||||||
ReadBufferFromFile in(persistent_path, DBMS_DEFAULT_BUFFER_SIZE);
|
ReadBufferFromFile in(persistent_path, DBMS_DEFAULT_BUFFER_SIZE);
|
||||||
assertString("Version:\t" + toString(meta_version), in);
|
assertString("Version:\t" + toString(meta_version), in);
|
||||||
|
@ -12,11 +12,12 @@
|
|||||||
#include <Parsers/parseQuery.h>
|
#include <Parsers/parseQuery.h>
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
#include <Common/escapeForFileName.h>
|
#include <Common/escapeForFileName.h>
|
||||||
#include <Poco/DirectoryIterator.h>
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
|
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -209,9 +210,9 @@ void DatabasePostgreSQL::attachTable(const String & table_name, const StoragePtr
|
|||||||
|
|
||||||
detached_or_dropped.erase(table_name);
|
detached_or_dropped.erase(table_name);
|
||||||
|
|
||||||
Poco::File table_marked_as_removed(getMetadataPath() + '/' + escapeForFileName(table_name) + suffix);
|
fs::path table_marked_as_removed = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix);
|
||||||
if (table_marked_as_removed.exists())
|
if (fs::exists(table_marked_as_removed))
|
||||||
table_marked_as_removed.remove();
|
fs::remove(table_marked_as_removed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -256,16 +257,8 @@ void DatabasePostgreSQL::dropTable(ContextPtr, const String & table_name, bool /
|
|||||||
if (detached_or_dropped.count(table_name))
|
if (detached_or_dropped.count(table_name))
|
||||||
throw Exception(fmt::format("Table {}.{} is already dropped/detached", database_name, table_name), ErrorCodes::TABLE_IS_DROPPED);
|
throw Exception(fmt::format("Table {}.{} is already dropped/detached", database_name, table_name), ErrorCodes::TABLE_IS_DROPPED);
|
||||||
|
|
||||||
Poco::File mark_table_removed(getMetadataPath() + '/' + escapeForFileName(table_name) + suffix);
|
fs::path mark_table_removed = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix);
|
||||||
|
FS::createFile(mark_table_removed);
|
||||||
try
|
|
||||||
{
|
|
||||||
mark_table_removed.createFile();
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cache_tables)
|
if (cache_tables)
|
||||||
cached_tables.erase(table_name);
|
cached_tables.erase(table_name);
|
||||||
@ -276,7 +269,7 @@ void DatabasePostgreSQL::dropTable(ContextPtr, const String & table_name, bool /
|
|||||||
|
|
||||||
void DatabasePostgreSQL::drop(ContextPtr /*context*/)
|
void DatabasePostgreSQL::drop(ContextPtr /*context*/)
|
||||||
{
|
{
|
||||||
Poco::File(getMetadataPath()).remove(true);
|
fs::remove_all(getMetadataPath());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -284,14 +277,14 @@ void DatabasePostgreSQL::loadStoredObjects(ContextMutablePtr /* context */, bool
|
|||||||
{
|
{
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock{mutex};
|
std::lock_guard<std::mutex> lock{mutex};
|
||||||
Poco::DirectoryIterator iterator(getMetadataPath());
|
fs::directory_iterator iter(getMetadataPath());
|
||||||
|
|
||||||
/// Check for previously dropped tables
|
/// Check for previously dropped tables
|
||||||
for (Poco::DirectoryIterator end; iterator != end; ++iterator)
|
for (fs::directory_iterator end; iter != end; ++iter)
|
||||||
{
|
{
|
||||||
if (iterator->isFile() && endsWith(iterator.name(), suffix))
|
if (fs::is_regular_file(iter->path()) && endsWith(iter->path().filename(), suffix))
|
||||||
{
|
{
|
||||||
const auto & file_name = iterator.name();
|
const auto & file_name = iter->path().filename().string();
|
||||||
const auto & table_name = unescapeForFileName(file_name.substr(0, file_name.size() - strlen(suffix)));
|
const auto & table_name = unescapeForFileName(file_name.substr(0, file_name.size() - strlen(suffix)));
|
||||||
detached_or_dropped.emplace(table_name);
|
detached_or_dropped.emplace(table_name);
|
||||||
}
|
}
|
||||||
@ -325,9 +318,9 @@ void DatabasePostgreSQL::removeOutdatedTables()
|
|||||||
{
|
{
|
||||||
auto table_name = *iter;
|
auto table_name = *iter;
|
||||||
iter = detached_or_dropped.erase(iter);
|
iter = detached_or_dropped.erase(iter);
|
||||||
Poco::File table_marked_as_removed(getMetadataPath() + '/' + escapeForFileName(table_name) + suffix);
|
fs::path table_marked_as_removed = fs::path(getMetadataPath()) / (escapeForFileName(table_name) + suffix);
|
||||||
if (table_marked_as_removed.exists())
|
if (fs::exists(table_marked_as_removed))
|
||||||
table_marked_as_removed.remove();
|
fs::remove(table_marked_as_removed);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
++iter;
|
++iter;
|
||||||
|
@ -3,14 +3,13 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include <common/types.h>
|
#include <common/types.h>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
#include <DataStreams/IBlockInputStream.h>
|
#include <DataStreams/IBlockInputStream.h>
|
||||||
#include <Columns/IColumn.h>
|
#include <Columns/IColumn.h>
|
||||||
#include <Core/Block.h>
|
#include <Core/Block.h>
|
||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -5,7 +5,9 @@
|
|||||||
#include <Poco/Exception.h>
|
#include <Poco/Exception.h>
|
||||||
#include <Poco/Util/Application.h>
|
#include <Poco/Util/Application.h>
|
||||||
#include "HierarchyFormatReader.h"
|
#include "HierarchyFormatReader.h"
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
bool RegionsHierarchyDataSource::isModified() const
|
bool RegionsHierarchyDataSource::isModified() const
|
||||||
{
|
{
|
||||||
@ -27,14 +29,13 @@ RegionsHierarchiesDataProvider::RegionsHierarchiesDataProvider(const std::string
|
|||||||
|
|
||||||
void RegionsHierarchiesDataProvider::discoverFilesWithCustomHierarchies()
|
void RegionsHierarchiesDataProvider::discoverFilesWithCustomHierarchies()
|
||||||
{
|
{
|
||||||
std::string basename = Poco::Path(path).getBaseName();
|
std::string basename = fs::path(path).stem();
|
||||||
|
fs::path dir_path = fs::canonical(path).parent_path();
|
||||||
|
|
||||||
Poco::Path dir_path = Poco::Path(path).absolute().parent();
|
fs::directory_iterator dir_end;
|
||||||
|
for (fs::directory_iterator dir_it(dir_path); dir_it != dir_end; ++dir_it)
|
||||||
Poco::DirectoryIterator dir_end;
|
|
||||||
for (Poco::DirectoryIterator dir_it(dir_path); dir_it != dir_end; ++dir_it)
|
|
||||||
{
|
{
|
||||||
std::string candidate_basename = dir_it.path().getBaseName();
|
std::string candidate_basename = dir_it->path().stem();
|
||||||
|
|
||||||
if (candidate_basename.starts_with(basename)
|
if (candidate_basename.starts_with(basename)
|
||||||
&& (candidate_basename.size() > basename.size() + 1)
|
&& (candidate_basename.size() > basename.size() + 1)
|
||||||
|
@ -2,7 +2,9 @@
|
|||||||
|
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
#include "NamesFormatReader.h"
|
#include "NamesFormatReader.h"
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
bool LanguageRegionsNamesDataSource::isModified() const
|
bool LanguageRegionsNamesDataSource::isModified() const
|
||||||
{
|
{
|
||||||
@ -11,7 +13,7 @@ bool LanguageRegionsNamesDataSource::isModified() const
|
|||||||
|
|
||||||
size_t LanguageRegionsNamesDataSource::estimateTotalSize() const
|
size_t LanguageRegionsNamesDataSource::estimateTotalSize() const
|
||||||
{
|
{
|
||||||
return Poco::File(path).getSize();
|
return fs::file_size(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
ILanguageRegionsNamesReaderPtr LanguageRegionsNamesDataSource::createReader()
|
ILanguageRegionsNamesReaderPtr LanguageRegionsNamesDataSource::createReader()
|
||||||
@ -39,7 +41,7 @@ RegionsNamesDataProvider::RegionsNamesDataProvider(const std::string & directory
|
|||||||
ILanguageRegionsNamesDataSourcePtr RegionsNamesDataProvider::getLanguageRegionsNamesSource(const std::string & language) const
|
ILanguageRegionsNamesDataSourcePtr RegionsNamesDataProvider::getLanguageRegionsNamesSource(const std::string & language) const
|
||||||
{
|
{
|
||||||
const auto data_file = getDataFilePath(language);
|
const auto data_file = getDataFilePath(language);
|
||||||
if (Poco::File(data_file).exists())
|
if (fs::exists(data_file))
|
||||||
return std::make_unique<LanguageRegionsNamesDataSource>(data_file, language);
|
return std::make_unique<LanguageRegionsNamesDataSource>(data_file, language);
|
||||||
else
|
else
|
||||||
return {};
|
return {};
|
||||||
|
@ -1,9 +1,4 @@
|
|||||||
#include "FileDictionarySource.h"
|
#include "FileDictionarySource.h"
|
||||||
|
|
||||||
#include <filesystem>
|
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
#include <Common/filesystemHelpers.h>
|
#include <Common/filesystemHelpers.h>
|
||||||
@ -15,6 +10,7 @@
|
|||||||
#include "registerDictionaries.h"
|
#include "registerDictionaries.h"
|
||||||
#include "DictionarySourceHelpers.h"
|
#include "DictionarySourceHelpers.h"
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
static const UInt64 max_block_size = 8192;
|
static const UInt64 max_block_size = 8192;
|
||||||
@ -68,9 +64,10 @@ std::string FileDictionarySource::toString() const
|
|||||||
|
|
||||||
Poco::Timestamp FileDictionarySource::getLastModification() const
|
Poco::Timestamp FileDictionarySource::getLastModification() const
|
||||||
{
|
{
|
||||||
return Poco::File{filepath}.getLastModified();
|
return FS::getModificationTimestamp(filepath);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void registerDictionarySourceFile(DictionarySourceFactory & factory)
|
void registerDictionarySourceFile(DictionarySourceFactory & factory)
|
||||||
{
|
{
|
||||||
auto create_table_source = [=](const DictionaryStructure & dict_struct,
|
auto create_table_source = [=](const DictionaryStructure & dict_struct,
|
||||||
|
@ -1,19 +1,19 @@
|
|||||||
#include "LibraryDictionarySource.h"
|
#include "LibraryDictionarySource.h"
|
||||||
|
|
||||||
#include <Poco/File.h>
|
#include <DataStreams/OneBlockInputStream.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <Common/filesystemHelpers.h>
|
#include <Common/filesystemHelpers.h>
|
||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <DataStreams/OneBlockInputStream.h>
|
#include <filesystem>
|
||||||
#include <Interpreters/Context.h>
|
|
||||||
|
|
||||||
#include <Dictionaries/DictionarySourceFactory.h>
|
#include <Dictionaries/DictionarySourceFactory.h>
|
||||||
#include <Dictionaries/DictionarySourceHelpers.h>
|
#include <Dictionaries/DictionarySourceHelpers.h>
|
||||||
#include <Dictionaries/DictionaryStructure.h>
|
#include <Dictionaries/DictionaryStructure.h>
|
||||||
#include <Dictionaries/registerDictionaries.h>
|
#include <Dictionaries/registerDictionaries.h>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -44,8 +44,8 @@ LibraryDictionarySource::LibraryDictionarySource(
|
|||||||
if (created_from_ddl && !pathStartsWith(path, context->getDictionariesLibPath()))
|
if (created_from_ddl && !pathStartsWith(path, context->getDictionariesLibPath()))
|
||||||
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, "File path {} is not inside {}", path, context->getDictionariesLibPath());
|
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, "File path {} is not inside {}", path, context->getDictionariesLibPath());
|
||||||
|
|
||||||
if (!Poco::File(path).exists())
|
if (!fs::exists(path))
|
||||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "LibraryDictionarySource: Can't load library {}: file doesn't exist", Poco::File(path).path());
|
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "LibraryDictionarySource: Can't load library {}: file doesn't exist", path);
|
||||||
|
|
||||||
description.init(sample_block);
|
description.init(sample_block);
|
||||||
bridge_helper = std::make_shared<LibraryBridgeHelper>(context, description.sample_block, dictionary_id);
|
bridge_helper = std::make_shared<LibraryBridgeHelper>(context, description.sample_block, dictionary_id);
|
||||||
|
@ -6,9 +6,9 @@
|
|||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Common/filesystemHelpers.h>
|
#include <Common/filesystemHelpers.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
|
|
||||||
#include <IO/createReadBufferFromFileBase.h>
|
#include <IO/createReadBufferFromFileBase.h>
|
||||||
|
|
||||||
|
#include <fstream>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
|
|
||||||
@ -60,27 +60,28 @@ class DiskLocalDirectoryIterator : public IDiskDirectoryIterator
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit DiskLocalDirectoryIterator(const String & disk_path_, const String & dir_path_)
|
explicit DiskLocalDirectoryIterator(const String & disk_path_, const String & dir_path_)
|
||||||
: dir_path(dir_path_), iter(disk_path_ + dir_path_)
|
: dir_path(dir_path_), entry(fs::path(disk_path_) / dir_path_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void next() override { ++iter; }
|
void next() override { ++entry; }
|
||||||
|
|
||||||
bool isValid() const override { return iter != Poco::DirectoryIterator(); }
|
bool isValid() const override { return entry != fs::directory_iterator(); }
|
||||||
|
|
||||||
String path() const override
|
String path() const override
|
||||||
{
|
{
|
||||||
if (iter->isDirectory())
|
if (entry->is_directory())
|
||||||
return dir_path + iter.name() + '/';
|
return dir_path / entry->path().filename() / "";
|
||||||
else
|
else
|
||||||
return dir_path + iter.name();
|
return dir_path / entry->path().filename();
|
||||||
}
|
}
|
||||||
|
|
||||||
String name() const override { return iter.name(); }
|
|
||||||
|
String name() const override { return entry->path().filename(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
String dir_path;
|
fs::path dir_path;
|
||||||
Poco::DirectoryIterator iter;
|
fs::directory_iterator entry;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -118,7 +119,7 @@ UInt64 DiskLocal::getTotalSpace() const
|
|||||||
{
|
{
|
||||||
struct statvfs fs;
|
struct statvfs fs;
|
||||||
if (name == "default") /// for default disk we get space from path/data/
|
if (name == "default") /// for default disk we get space from path/data/
|
||||||
fs = getStatVFS(disk_path + "data/");
|
fs = getStatVFS((fs::path(disk_path) / "data/").string());
|
||||||
else
|
else
|
||||||
fs = getStatVFS(disk_path);
|
fs = getStatVFS(disk_path);
|
||||||
UInt64 total_size = fs.f_blocks * fs.f_bsize;
|
UInt64 total_size = fs.f_blocks * fs.f_bsize;
|
||||||
@ -133,7 +134,7 @@ UInt64 DiskLocal::getAvailableSpace() const
|
|||||||
/// available for superuser only and for system purposes
|
/// available for superuser only and for system purposes
|
||||||
struct statvfs fs;
|
struct statvfs fs;
|
||||||
if (name == "default") /// for default disk we get space from path/data/
|
if (name == "default") /// for default disk we get space from path/data/
|
||||||
fs = getStatVFS(disk_path + "data/");
|
fs = getStatVFS((fs::path(disk_path) / "data/").string());
|
||||||
else
|
else
|
||||||
fs = getStatVFS(disk_path);
|
fs = getStatVFS(disk_path);
|
||||||
UInt64 total_size = fs.f_bavail * fs.f_bsize;
|
UInt64 total_size = fs.f_bavail * fs.f_bsize;
|
||||||
@ -152,45 +153,43 @@ UInt64 DiskLocal::getUnreservedSpace() const
|
|||||||
|
|
||||||
bool DiskLocal::exists(const String & path) const
|
bool DiskLocal::exists(const String & path) const
|
||||||
{
|
{
|
||||||
return Poco::File(disk_path + path).exists();
|
return fs::exists(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DiskLocal::isFile(const String & path) const
|
bool DiskLocal::isFile(const String & path) const
|
||||||
{
|
{
|
||||||
return Poco::File(disk_path + path).isFile();
|
return fs::is_regular_file(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DiskLocal::isDirectory(const String & path) const
|
bool DiskLocal::isDirectory(const String & path) const
|
||||||
{
|
{
|
||||||
return Poco::File(disk_path + path).isDirectory();
|
return fs::is_directory(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t DiskLocal::getFileSize(const String & path) const
|
size_t DiskLocal::getFileSize(const String & path) const
|
||||||
{
|
{
|
||||||
return Poco::File(disk_path + path).getSize();
|
return fs::file_size(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::createDirectory(const String & path)
|
void DiskLocal::createDirectory(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + path).createDirectory();
|
fs::create_directory(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::createDirectories(const String & path)
|
void DiskLocal::createDirectories(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + path).createDirectories();
|
fs::create_directories(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::clearDirectory(const String & path)
|
void DiskLocal::clearDirectory(const String & path)
|
||||||
{
|
{
|
||||||
std::vector<Poco::File> files;
|
for (const auto & entry : fs::directory_iterator(fs::path(disk_path) / path))
|
||||||
Poco::File(disk_path + path).list(files);
|
fs::remove(entry.path());
|
||||||
for (auto & file : files)
|
|
||||||
file.remove();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::moveDirectory(const String & from_path, const String & to_path)
|
void DiskLocal::moveDirectory(const String & from_path, const String & to_path)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + from_path).renameTo(disk_path + to_path);
|
fs::rename(fs::path(disk_path) / from_path, fs::path(disk_path) / to_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
DiskDirectoryIteratorPtr DiskLocal::iterateDirectory(const String & path)
|
DiskDirectoryIteratorPtr DiskLocal::iterateDirectory(const String & path)
|
||||||
@ -200,99 +199,95 @@ DiskDirectoryIteratorPtr DiskLocal::iterateDirectory(const String & path)
|
|||||||
|
|
||||||
void DiskLocal::moveFile(const String & from_path, const String & to_path)
|
void DiskLocal::moveFile(const String & from_path, const String & to_path)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + from_path).renameTo(disk_path + to_path);
|
fs::rename(fs::path(disk_path) / from_path, fs::path(disk_path) / to_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::replaceFile(const String & from_path, const String & to_path)
|
void DiskLocal::replaceFile(const String & from_path, const String & to_path)
|
||||||
{
|
{
|
||||||
Poco::File from_file(disk_path + from_path);
|
fs::path from_file = fs::path(disk_path) / from_path;
|
||||||
Poco::File to_file(disk_path + to_path);
|
fs::path to_file = fs::path(disk_path) / to_path;
|
||||||
if (to_file.exists())
|
fs::rename(from_file, to_file);
|
||||||
{
|
|
||||||
Poco::File tmp_file(disk_path + to_path + ".old");
|
|
||||||
to_file.renameTo(tmp_file.path());
|
|
||||||
from_file.renameTo(disk_path + to_path);
|
|
||||||
tmp_file.remove();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
from_file.renameTo(to_file.path());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase>
|
std::unique_ptr<ReadBufferFromFileBase>
|
||||||
DiskLocal::readFile(
|
DiskLocal::readFile(
|
||||||
const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const
|
const String & path, size_t buf_size, size_t estimated_size, size_t aio_threshold, size_t mmap_threshold, MMappedFileCache * mmap_cache) const
|
||||||
{
|
{
|
||||||
return createReadBufferFromFileBase(disk_path + path, estimated_size, aio_threshold, mmap_threshold, mmap_cache, buf_size);
|
return createReadBufferFromFileBase(fs::path(disk_path) / path, estimated_size, aio_threshold, mmap_threshold, mmap_cache, buf_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<WriteBufferFromFileBase>
|
std::unique_ptr<WriteBufferFromFileBase>
|
||||||
DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode)
|
DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode)
|
||||||
{
|
{
|
||||||
int flags = (mode == WriteMode::Append) ? (O_APPEND | O_CREAT | O_WRONLY) : -1;
|
int flags = (mode == WriteMode::Append) ? (O_APPEND | O_CREAT | O_WRONLY) : -1;
|
||||||
return std::make_unique<WriteBufferFromFile>(disk_path + path, buf_size, flags);
|
return std::make_unique<WriteBufferFromFile>(fs::path(disk_path) / path, buf_size, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::removeFile(const String & path)
|
void DiskLocal::removeFile(const String & path)
|
||||||
{
|
{
|
||||||
auto fs_path = disk_path + path;
|
auto fs_path = fs::path(disk_path) / path;
|
||||||
if (0 != unlink(fs_path.c_str()))
|
if (0 != unlink(fs_path.c_str()))
|
||||||
throwFromErrnoWithPath("Cannot unlink file " + fs_path, fs_path, ErrorCodes::CANNOT_UNLINK);
|
throwFromErrnoWithPath("Cannot unlink file " + fs_path.string(), fs_path, ErrorCodes::CANNOT_UNLINK);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::removeFileIfExists(const String & path)
|
void DiskLocal::removeFileIfExists(const String & path)
|
||||||
{
|
{
|
||||||
auto fs_path = disk_path + path;
|
auto fs_path = fs::path(disk_path) / path;
|
||||||
if (0 != unlink(fs_path.c_str()) && errno != ENOENT)
|
if (0 != unlink(fs_path.c_str()) && errno != ENOENT)
|
||||||
throwFromErrnoWithPath("Cannot unlink file " + fs_path, fs_path, ErrorCodes::CANNOT_UNLINK);
|
throwFromErrnoWithPath("Cannot unlink file " + fs_path.string(), fs_path, ErrorCodes::CANNOT_UNLINK);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::removeDirectory(const String & path)
|
void DiskLocal::removeDirectory(const String & path)
|
||||||
{
|
{
|
||||||
auto fs_path = disk_path + path;
|
auto fs_path = fs::path(disk_path) / path;
|
||||||
if (0 != rmdir(fs_path.c_str()))
|
if (0 != rmdir(fs_path.c_str()))
|
||||||
throwFromErrnoWithPath("Cannot rmdir " + fs_path, fs_path, ErrorCodes::CANNOT_RMDIR);
|
throwFromErrnoWithPath("Cannot rmdir " + fs_path.string(), fs_path, ErrorCodes::CANNOT_RMDIR);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::removeRecursive(const String & path)
|
void DiskLocal::removeRecursive(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + path).remove(true);
|
fs::remove_all(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::listFiles(const String & path, std::vector<String> & file_names)
|
void DiskLocal::listFiles(const String & path, std::vector<String> & file_names)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + path).list(file_names);
|
file_names.clear();
|
||||||
|
for (const auto & entry : fs::directory_iterator(fs::path(disk_path) / path))
|
||||||
|
file_names.emplace_back(entry.path().filename());
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::setLastModified(const String & path, const Poco::Timestamp & timestamp)
|
void DiskLocal::setLastModified(const String & path, const Poco::Timestamp & timestamp)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + path).setLastModified(timestamp);
|
FS::setModificationTime(fs::path(disk_path) / path, timestamp.epochTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Timestamp DiskLocal::getLastModified(const String & path)
|
Poco::Timestamp DiskLocal::getLastModified(const String & path)
|
||||||
{
|
{
|
||||||
return Poco::File(disk_path + path).getLastModified();
|
return FS::getModificationTimestamp(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::createHardLink(const String & src_path, const String & dst_path)
|
void DiskLocal::createHardLink(const String & src_path, const String & dst_path)
|
||||||
{
|
{
|
||||||
DB::createHardLink(disk_path + src_path, disk_path + dst_path);
|
DB::createHardLink(fs::path(disk_path) / src_path, fs::path(disk_path) / dst_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::truncateFile(const String & path, size_t size)
|
void DiskLocal::truncateFile(const String & path, size_t size)
|
||||||
{
|
{
|
||||||
int res = truncate((disk_path + path).c_str(), size);
|
int res = truncate((fs::path(disk_path) / path).string().data(), size);
|
||||||
if (-1 == res)
|
if (-1 == res)
|
||||||
throwFromErrnoWithPath("Cannot truncate file " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE);
|
throwFromErrnoWithPath("Cannot truncate file " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::createFile(const String & path)
|
void DiskLocal::createFile(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + path).createFile();
|
FS::createFile(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::setReadOnly(const String & path)
|
void DiskLocal::setReadOnly(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + path).setReadOnly(true);
|
fs::permissions(fs::path(disk_path) / path,
|
||||||
|
fs::perms::owner_write | fs::perms::group_write | fs::perms::others_write,
|
||||||
|
fs::perm_options::remove);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool inline isSameDiskType(const IDisk & one, const IDisk & another)
|
bool inline isSameDiskType(const IDisk & one, const IDisk & another)
|
||||||
@ -303,14 +298,23 @@ bool inline isSameDiskType(const IDisk & one, const IDisk & another)
|
|||||||
void DiskLocal::copy(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path)
|
void DiskLocal::copy(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path)
|
||||||
{
|
{
|
||||||
if (isSameDiskType(*this, *to_disk))
|
if (isSameDiskType(*this, *to_disk))
|
||||||
Poco::File(disk_path + from_path).copyTo(to_disk->getPath() + to_path); /// Use more optimal way.
|
{
|
||||||
|
fs::path to = fs::path(to_disk->getPath()) / to_path;
|
||||||
|
fs::path from = fs::path(disk_path) / from_path;
|
||||||
|
if (from_path.ends_with('/'))
|
||||||
|
from = from.parent_path();
|
||||||
|
if (fs::is_directory(from))
|
||||||
|
to /= from.filename();
|
||||||
|
|
||||||
|
fs::copy(from, to, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
|
||||||
|
}
|
||||||
else
|
else
|
||||||
IDisk::copy(from_path, to_disk, to_path); /// Copy files through buffers.
|
IDisk::copy(from_path, to_disk, to_path); /// Copy files through buffers.
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
|
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
|
||||||
{
|
{
|
||||||
return std::make_unique<LocalDirectorySyncGuard>(disk_path + path);
|
return std::make_unique<LocalDirectorySyncGuard>(fs::path(disk_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
DiskPtr DiskLocalReservation::getDisk(size_t i) const
|
DiskPtr DiskLocalReservation::getDisk(size_t i) const
|
||||||
@ -381,10 +385,8 @@ void registerDiskLocal(DiskFactory & factory)
|
|||||||
throw Exception("Disk path must end with /. Disk " + name, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
throw Exception("Disk path must end with /. Disk " + name, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Poco::File disk{path}; !disk.canRead() || !disk.canWrite())
|
if (!FS::canRead(path) || !FS::canWrite(path))
|
||||||
{
|
|
||||||
throw Exception("There is no RW access to the disk " + name + " (" + path + ")", ErrorCodes::PATH_ACCESS_DENIED);
|
throw Exception("There is no RW access to the disk " + name + " (" + path + ")", ErrorCodes::PATH_ACCESS_DENIED);
|
||||||
}
|
|
||||||
|
|
||||||
bool has_space_ratio = config.has(config_prefix + ".keep_free_space_ratio");
|
bool has_space_ratio = config.has(config_prefix + ".keep_free_space_ratio");
|
||||||
|
|
||||||
|
@ -6,8 +6,6 @@
|
|||||||
#include <IO/ReadBufferFromFileBase.h>
|
#include <IO/ReadBufferFromFileBase.h>
|
||||||
#include <IO/WriteBufferFromFile.h>
|
#include <IO/WriteBufferFromFile.h>
|
||||||
|
|
||||||
#include <Poco/DirectoryIterator.h>
|
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -27,7 +25,7 @@ public:
|
|||||||
: name(name_), disk_path(path_), keep_free_space_bytes(keep_free_space_bytes_)
|
: name(name_), disk_path(path_), keep_free_space_bytes(keep_free_space_bytes_)
|
||||||
{
|
{
|
||||||
if (disk_path.back() != '/')
|
if (disk_path.back() != '/')
|
||||||
throw Exception("Disk path must ends with '/', but '" + disk_path + "' doesn't.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Disk path must end with '/', but '" + disk_path + "' doesn't.", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
|
||||||
const String & getName() const override { return name; }
|
const String & getName() const override { return name; }
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <IO/WriteBufferFromFileBase.h>
|
#include <IO/WriteBufferFromFileBase.h>
|
||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Poco/Path.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -24,7 +23,7 @@ namespace ErrorCodes
|
|||||||
class DiskMemoryDirectoryIterator final : public IDiskDirectoryIterator
|
class DiskMemoryDirectoryIterator final : public IDiskDirectoryIterator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit DiskMemoryDirectoryIterator(std::vector<Poco::Path> && dir_file_paths_)
|
explicit DiskMemoryDirectoryIterator(std::vector<fs::path> && dir_file_paths_)
|
||||||
: dir_file_paths(std::move(dir_file_paths_)), iter(dir_file_paths.begin())
|
: dir_file_paths(std::move(dir_file_paths_)), iter(dir_file_paths.begin())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -33,13 +32,13 @@ public:
|
|||||||
|
|
||||||
bool isValid() const override { return iter != dir_file_paths.end(); }
|
bool isValid() const override { return iter != dir_file_paths.end(); }
|
||||||
|
|
||||||
String path() const override { return (*iter).toString(); }
|
String path() const override { return iter->string(); }
|
||||||
|
|
||||||
String name() const override { return (*iter).getFileName(); }
|
String name() const override { return iter->filename(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::vector<Poco::Path> dir_file_paths;
|
std::vector<fs::path> dir_file_paths;
|
||||||
std::vector<Poco::Path>::iterator iter;
|
std::vector<fs::path>::iterator iter;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -268,7 +267,7 @@ DiskDirectoryIteratorPtr DiskMemory::iterateDirectory(const String & path)
|
|||||||
if (!path.empty() && files.find(path) == files.end())
|
if (!path.empty() && files.find(path) == files.end())
|
||||||
throw Exception("Directory '" + path + "' does not exist", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
throw Exception("Directory '" + path + "' does not exist", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||||
|
|
||||||
std::vector<Poco::Path> dir_file_paths;
|
std::vector<fs::path> dir_file_paths;
|
||||||
for (const auto & file : files)
|
for (const auto & file : files)
|
||||||
if (parentPath(file.first) == path)
|
if (parentPath(file.first) == path)
|
||||||
dir_file_paths.emplace_back(file.first);
|
dir_file_paths.emplace_back(file.first);
|
||||||
|
@ -172,8 +172,8 @@ void registerDiskHDFS(DiskFactory & factory)
|
|||||||
const String & config_prefix,
|
const String & config_prefix,
|
||||||
ContextConstPtr context_) -> DiskPtr
|
ContextConstPtr context_) -> DiskPtr
|
||||||
{
|
{
|
||||||
Poco::File disk{context_->getPath() + "disks/" + name};
|
fs::path disk = fs::path(context_->getPath()) / "disks" / name;
|
||||||
disk.createDirectories();
|
fs::create_directories(disk);
|
||||||
|
|
||||||
String uri{config.getString(config_prefix + ".endpoint")};
|
String uri{config.getString(config_prefix + ".endpoint")};
|
||||||
|
|
||||||
|
@ -22,7 +22,8 @@ bool IDisk::isDirectoryEmpty(const String & path)
|
|||||||
|
|
||||||
void copyFile(IDisk & from_disk, const String & from_path, IDisk & to_disk, const String & to_path)
|
void copyFile(IDisk & from_disk, const String & from_path, IDisk & to_disk, const String & to_path)
|
||||||
{
|
{
|
||||||
LOG_DEBUG(&Poco::Logger::get("IDisk"), "Copying from {} {} to {} {}.", from_disk.getName(), from_path, to_disk.getName(), to_path);
|
LOG_DEBUG(&Poco::Logger::get("IDisk"), "Copying from {} (path: {}) {} to {} (path: {}) {}.",
|
||||||
|
from_disk.getName(), from_disk.getPath(), from_path, to_disk.getName(), to_disk.getPath(), to_path);
|
||||||
|
|
||||||
auto in = from_disk.readFile(from_path);
|
auto in = from_disk.readFile(from_path);
|
||||||
auto out = to_disk.writeFile(to_path);
|
auto out = to_disk.writeFile(to_path);
|
||||||
@ -41,16 +42,15 @@ void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_p
|
|||||||
[&from_disk, from_path, &to_disk, to_path]()
|
[&from_disk, from_path, &to_disk, to_path]()
|
||||||
{
|
{
|
||||||
setThreadName("DiskCopier");
|
setThreadName("DiskCopier");
|
||||||
DB::copyFile(from_disk, from_path, to_disk, to_path + fileName(from_path));
|
DB::copyFile(from_disk, from_path, to_disk, fs::path(to_path) / fileName(from_path));
|
||||||
});
|
});
|
||||||
|
|
||||||
results.push_back(std::move(result));
|
results.push_back(std::move(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Poco::Path path(from_path);
|
fs::path dir_name = fs::path(from_path).parent_path().filename();
|
||||||
const String & dir_name = path.directory(path.depth() - 1);
|
fs::path dest(fs::path(to_path) / dir_name);
|
||||||
const String dest = to_path + dir_name + "/";
|
|
||||||
to_disk.createDirectories(dest);
|
to_disk.createDirectories(dest);
|
||||||
|
|
||||||
for (auto it = from_disk.iterateDirectory(from_path); it->isValid(); it->next())
|
for (auto it = from_disk.iterateDirectory(from_path); it->isValid(); it->next())
|
||||||
|
@ -7,16 +7,16 @@
|
|||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Disks/Executor.h>
|
#include <Disks/Executor.h>
|
||||||
#include <Disks/DiskType.h>
|
#include <Disks/DiskType.h>
|
||||||
#include "Disks/Executor.h"
|
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Poco/Timestamp.h>
|
#include <Poco/Timestamp.h>
|
||||||
|
#include <filesystem>
|
||||||
#include "Poco/Util/AbstractConfiguration.h"
|
#include "Poco/Util/AbstractConfiguration.h"
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
@ -294,25 +294,27 @@ public:
|
|||||||
/// Return full path to a file on disk.
|
/// Return full path to a file on disk.
|
||||||
inline String fullPath(const DiskPtr & disk, const String & path)
|
inline String fullPath(const DiskPtr & disk, const String & path)
|
||||||
{
|
{
|
||||||
return disk->getPath() + path;
|
return fs::path(disk->getPath()) / path;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return parent path for the specified path.
|
/// Return parent path for the specified path.
|
||||||
inline String parentPath(const String & path)
|
inline String parentPath(const String & path)
|
||||||
{
|
{
|
||||||
return Poco::Path(path).parent().toString();
|
if (path.ends_with('/'))
|
||||||
|
return fs::path(path).parent_path().parent_path() / "";
|
||||||
|
return fs::path(path).parent_path() / "";
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return file name for the specified path.
|
/// Return file name for the specified path.
|
||||||
inline String fileName(const String & path)
|
inline String fileName(const String & path)
|
||||||
{
|
{
|
||||||
return Poco::Path(path).getFileName();
|
return fs::path(path).filename();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return directory path for the specified path.
|
/// Return directory path for the specified path.
|
||||||
inline String directoryPath(const String & path)
|
inline String directoryPath(const String & path)
|
||||||
{
|
{
|
||||||
return Poco::Path(path).setFileName("").toString();
|
return fs::path(path).parent_path() / "";
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -6,12 +6,12 @@
|
|||||||
#include <IO/WriteBufferFromFile.h>
|
#include <IO/WriteBufferFromFile.h>
|
||||||
#include <IO/WriteBufferFromS3.h>
|
#include <IO/WriteBufferFromS3.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Common/createHardLink.h>
|
#include <Common/createHardLink.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <Common/checkStackSize.h>
|
#include <Common/checkStackSize.h>
|
||||||
#include <boost/algorithm/string.hpp>
|
#include <boost/algorithm/string.hpp>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -179,9 +179,9 @@ void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths
|
|||||||
{
|
{
|
||||||
LOG_DEBUG(log, "Remove file by path: {}", backQuote(metadata_path + path));
|
LOG_DEBUG(log, "Remove file by path: {}", backQuote(metadata_path + path));
|
||||||
|
|
||||||
Poco::File file(metadata_path + path);
|
fs::path file(metadata_path + path);
|
||||||
|
|
||||||
if (!file.isFile())
|
if (!fs::is_regular_file(file))
|
||||||
throw Exception(ErrorCodes::CANNOT_DELETE_DIRECTORY, "Path '{}' is a directory", path);
|
throw Exception(ErrorCodes::CANNOT_DELETE_DIRECTORY, "Path '{}' is a directory", path);
|
||||||
|
|
||||||
try
|
try
|
||||||
@ -191,7 +191,7 @@ void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths
|
|||||||
/// If there is no references - delete content from remote FS.
|
/// If there is no references - delete content from remote FS.
|
||||||
if (metadata.ref_count == 0)
|
if (metadata.ref_count == 0)
|
||||||
{
|
{
|
||||||
file.remove();
|
fs::remove(file);
|
||||||
for (const auto & [remote_fs_object_path, _] : metadata.remote_fs_objects)
|
for (const auto & [remote_fs_object_path, _] : metadata.remote_fs_objects)
|
||||||
fs_paths_keeper->addPath(remote_fs_root_path + remote_fs_object_path);
|
fs_paths_keeper->addPath(remote_fs_root_path + remote_fs_object_path);
|
||||||
}
|
}
|
||||||
@ -199,7 +199,7 @@ void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths
|
|||||||
{
|
{
|
||||||
--metadata.ref_count;
|
--metadata.ref_count;
|
||||||
metadata.save();
|
metadata.save();
|
||||||
file.remove();
|
fs::remove(file);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
@ -210,7 +210,7 @@ void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths
|
|||||||
LOG_WARNING(log,
|
LOG_WARNING(log,
|
||||||
"Metadata file {} can't be read by reason: {}. Removing it forcibly.",
|
"Metadata file {} can't be read by reason: {}. Removing it forcibly.",
|
||||||
backQuote(path), e.nested() ? e.nested()->message() : e.message());
|
backQuote(path), e.nested() ? e.nested()->message() : e.message());
|
||||||
file.remove();
|
fs::remove(file);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw;
|
throw;
|
||||||
@ -222,8 +222,8 @@ void IDiskRemote::removeMetaRecursive(const String & path, RemoteFSPathKeeperPtr
|
|||||||
{
|
{
|
||||||
checkStackSize(); /// This is needed to prevent stack overflow in case of cyclic symlinks.
|
checkStackSize(); /// This is needed to prevent stack overflow in case of cyclic symlinks.
|
||||||
|
|
||||||
Poco::File file(metadata_path + path);
|
fs::path file = fs::path(metadata_path) / path;
|
||||||
if (file.isFile())
|
if (fs::is_regular_file(file))
|
||||||
{
|
{
|
||||||
removeMeta(path, fs_paths_keeper);
|
removeMeta(path, fs_paths_keeper);
|
||||||
}
|
}
|
||||||
@ -231,7 +231,7 @@ void IDiskRemote::removeMetaRecursive(const String & path, RemoteFSPathKeeperPtr
|
|||||||
{
|
{
|
||||||
for (auto it{iterateDirectory(path)}; it->isValid(); it->next())
|
for (auto it{iterateDirectory(path)}; it->isValid(); it->next())
|
||||||
removeMetaRecursive(it->path(), fs_paths_keeper);
|
removeMetaRecursive(it->path(), fs_paths_keeper);
|
||||||
file.remove();
|
fs::remove(file);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,13 +296,13 @@ IDiskRemote::IDiskRemote(
|
|||||||
|
|
||||||
bool IDiskRemote::exists(const String & path) const
|
bool IDiskRemote::exists(const String & path) const
|
||||||
{
|
{
|
||||||
return Poco::File(metadata_path + path).exists();
|
return fs::exists(fs::path(metadata_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool IDiskRemote::isFile(const String & path) const
|
bool IDiskRemote::isFile(const String & path) const
|
||||||
{
|
{
|
||||||
return Poco::File(metadata_path + path).isFile();
|
return fs::is_regular_file(fs::path(metadata_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -326,7 +326,7 @@ void IDiskRemote::moveFile(const String & from_path, const String & to_path)
|
|||||||
if (exists(to_path))
|
if (exists(to_path))
|
||||||
throw Exception("File already exists: " + to_path, ErrorCodes::FILE_ALREADY_EXISTS);
|
throw Exception("File already exists: " + to_path, ErrorCodes::FILE_ALREADY_EXISTS);
|
||||||
|
|
||||||
Poco::File(metadata_path + from_path).renameTo(metadata_path + to_path);
|
fs::rename(fs::path(metadata_path) / from_path, fs::path(metadata_path) / to_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -347,7 +347,7 @@ void IDiskRemote::replaceFile(const String & from_path, const String & to_path)
|
|||||||
void IDiskRemote::removeFileIfExists(const String & path)
|
void IDiskRemote::removeFileIfExists(const String & path)
|
||||||
{
|
{
|
||||||
RemoteFSPathKeeperPtr fs_paths_keeper = createFSPathKeeper();
|
RemoteFSPathKeeperPtr fs_paths_keeper = createFSPathKeeper();
|
||||||
if (Poco::File(metadata_path + path).exists())
|
if (fs::exists(fs::path(metadata_path) / path))
|
||||||
{
|
{
|
||||||
removeMeta(path, fs_paths_keeper);
|
removeMeta(path, fs_paths_keeper);
|
||||||
removeFromRemoteFS(fs_paths_keeper);
|
removeFromRemoteFS(fs_paths_keeper);
|
||||||
@ -385,19 +385,19 @@ void IDiskRemote::setReadOnly(const String & path)
|
|||||||
|
|
||||||
bool IDiskRemote::isDirectory(const String & path) const
|
bool IDiskRemote::isDirectory(const String & path) const
|
||||||
{
|
{
|
||||||
return Poco::File(metadata_path + path).isDirectory();
|
return fs::is_directory(fs::path(metadata_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void IDiskRemote::createDirectory(const String & path)
|
void IDiskRemote::createDirectory(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(metadata_path + path).createDirectory();
|
fs::create_directory(fs::path(metadata_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void IDiskRemote::createDirectories(const String & path)
|
void IDiskRemote::createDirectories(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(metadata_path + path).createDirectories();
|
fs::create_directories(fs::path(metadata_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -411,7 +411,7 @@ void IDiskRemote::clearDirectory(const String & path)
|
|||||||
|
|
||||||
void IDiskRemote::removeDirectory(const String & path)
|
void IDiskRemote::removeDirectory(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(metadata_path + path).remove();
|
fs::remove(fs::path(metadata_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -430,13 +430,13 @@ void IDiskRemote::listFiles(const String & path, std::vector<String> & file_name
|
|||||||
|
|
||||||
void IDiskRemote::setLastModified(const String & path, const Poco::Timestamp & timestamp)
|
void IDiskRemote::setLastModified(const String & path, const Poco::Timestamp & timestamp)
|
||||||
{
|
{
|
||||||
Poco::File(metadata_path + path).setLastModified(timestamp);
|
FS::setModificationTime(fs::path(metadata_path) / path, timestamp.epochTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Poco::Timestamp IDiskRemote::getLastModified(const String & path)
|
Poco::Timestamp IDiskRemote::getLastModified(const String & path)
|
||||||
{
|
{
|
||||||
return Poco::File(metadata_path + path).getLastModified();
|
return FS::getModificationTimestamp(fs::path(metadata_path) / path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -4,11 +4,12 @@
|
|||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include "Disks/DiskFactory.h"
|
#include "Disks/DiskFactory.h"
|
||||||
#include "Disks/Executor.h"
|
#include "Disks/Executor.h"
|
||||||
#include <Poco/DirectoryIterator.h>
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <Common/MultiVersion.h>
|
#include <Common/MultiVersion.h>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -193,21 +194,21 @@ public:
|
|||||||
|
|
||||||
void next() override { ++iter; }
|
void next() override { ++iter; }
|
||||||
|
|
||||||
bool isValid() const override { return iter != Poco::DirectoryIterator(); }
|
bool isValid() const override { return iter != fs::directory_iterator(); }
|
||||||
|
|
||||||
String path() const override
|
String path() const override
|
||||||
{
|
{
|
||||||
if (iter->isDirectory())
|
if (fs::is_directory(iter->path()))
|
||||||
return folder_path + iter.name() + '/';
|
return folder_path / iter->path().filename().string() / "";
|
||||||
else
|
else
|
||||||
return folder_path + iter.name();
|
return folder_path / iter->path().filename().string();
|
||||||
}
|
}
|
||||||
|
|
||||||
String name() const override { return iter.name(); }
|
String name() const override { return iter->path().filename(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::DirectoryIterator iter;
|
fs::directory_iterator iter;
|
||||||
String folder_path;
|
fs::path folder_path;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ ReadIndirectBufferFromRemoteFS<T>::ReadIndirectBufferFromRemoteFS(
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
off_t ReadIndirectBufferFromRemoteFS<T>::seek(off_t offset_, int whence)
|
off_t ReadIndirectBufferFromRemoteFS<T>::seek(off_t offset_, int whence)
|
||||||
{
|
{
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
#include <IO/SeekAvoidingReadBuffer.h>
|
#include <IO/SeekAvoidingReadBuffer.h>
|
||||||
#include <IO/WriteBufferFromS3.h>
|
#include <IO/WriteBufferFromS3.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Common/createHardLink.h>
|
#include <Common/createHardLink.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <Common/thread_local_rng.h>
|
#include <Common/thread_local_rng.h>
|
||||||
@ -215,7 +214,7 @@ void DiskS3::moveFile(const String & from_path, const String & to_path, bool sen
|
|||||||
createFileOperationObject("rename", revision, object_metadata);
|
createFileOperationObject("rename", revision, object_metadata);
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::File(metadata_path + from_path).renameTo(metadata_path + to_path);
|
fs::rename(fs::path(metadata_path) / from_path, fs::path(metadata_path) / to_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase> DiskS3::readFile(const String & path, size_t buf_size, size_t, size_t, size_t, MMappedFileCache *) const
|
std::unique_ptr<ReadBufferFromFileBase> DiskS3::readFile(const String & path, size_t buf_size, size_t, size_t, size_t, MMappedFileCache *) const
|
||||||
@ -675,8 +674,8 @@ void DiskS3::restore()
|
|||||||
restoreFiles(information);
|
restoreFiles(information);
|
||||||
restoreFileOperations(information);
|
restoreFileOperations(information);
|
||||||
|
|
||||||
Poco::File restore_file(metadata_path + RESTORE_FILE_NAME);
|
fs::path restore_file = fs::path(metadata_path) / RESTORE_FILE_NAME;
|
||||||
restore_file.remove();
|
fs::remove(restore_file);
|
||||||
|
|
||||||
saveSchemaVersion(RESTORABLE_SCHEMA_VERSION);
|
saveSchemaVersion(RESTORABLE_SCHEMA_VERSION);
|
||||||
|
|
||||||
@ -863,8 +862,9 @@ void DiskS3::restoreFileOperations(const RestoreInformation & restore_informatio
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/// Skip not finished parts. They shouldn't be in 'detached' directory, because CH wouldn't be able to finish processing them.
|
/// Skip not finished parts. They shouldn't be in 'detached' directory, because CH wouldn't be able to finish processing them.
|
||||||
Poco::Path directory_path (path);
|
fs::path directory_path(path);
|
||||||
auto directory_name = directory_path.directory(directory_path.depth() - 1);
|
auto directory_name = directory_path.parent_path().filename().string();
|
||||||
|
|
||||||
auto predicate = [&directory_name](String & prefix) { return directory_name.starts_with(prefix); };
|
auto predicate = [&directory_name](String & prefix) { return directory_name.starts_with(prefix); };
|
||||||
if (std::any_of(not_finished_prefixes.begin(), not_finished_prefixes.end(), predicate))
|
if (std::any_of(not_finished_prefixes.begin(), not_finished_prefixes.end(), predicate))
|
||||||
continue;
|
continue;
|
||||||
@ -873,7 +873,14 @@ void DiskS3::restoreFileOperations(const RestoreInformation & restore_informatio
|
|||||||
|
|
||||||
LOG_DEBUG(log, "Move directory to 'detached' {} -> {}", path, detached_path);
|
LOG_DEBUG(log, "Move directory to 'detached' {} -> {}", path, detached_path);
|
||||||
|
|
||||||
Poco::File(metadata_path + path).moveTo(metadata_path + detached_path);
|
fs::path from_path = fs::path(metadata_path) / path;
|
||||||
|
fs::path to_path = fs::path(metadata_path) / detached_path;
|
||||||
|
if (path.ends_with('/'))
|
||||||
|
to_path /= from_path.parent_path().filename();
|
||||||
|
else
|
||||||
|
to_path /= from_path.filename();
|
||||||
|
fs::copy(from_path, to_path, fs::copy_options::recursive | fs::copy_options::overwrite_existing);
|
||||||
|
fs::remove_all(from_path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -905,7 +912,9 @@ String DiskS3::revisionToString(UInt64 revision)
|
|||||||
|
|
||||||
String DiskS3::pathToDetached(const String & source_path)
|
String DiskS3::pathToDetached(const String & source_path)
|
||||||
{
|
{
|
||||||
return Poco::Path(source_path).parent().append(Poco::Path("detached")).toString() + '/';
|
if (source_path.ends_with('/'))
|
||||||
|
return fs::path(source_path).parent_path().parent_path() / "detached/";
|
||||||
|
return fs::path(source_path).parent_path() / "detached/";
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskS3::onFreeze(const String & path)
|
void DiskS3::onFreeze(const String & path)
|
||||||
|
@ -174,7 +174,7 @@ void registerDiskS3(DiskFactory & factory)
|
|||||||
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
String metadata_path = config.getString(config_prefix + ".metadata_path", context->getPath() + "disks/" + name + "/");
|
String metadata_path = config.getString(config_prefix + ".metadata_path", context->getPath() + "disks/" + name + "/");
|
||||||
Poco::File (metadata_path).createDirectories();
|
fs::create_directories(metadata_path);
|
||||||
|
|
||||||
std::shared_ptr<IDisk> s3disk = std::make_shared<DiskS3>(
|
std::shared_ptr<IDisk> s3disk = std::make_shared<DiskS3>(
|
||||||
name,
|
name,
|
||||||
|
@ -8,8 +8,6 @@
|
|||||||
|
|
||||||
#include <set>
|
#include <set>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
/// Stores data in S3/HDFS and adds the object key (S3 path) and object size to metadata file on local FS.
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
WriteIndirectBufferFromRemoteFS<T>::WriteIndirectBufferFromRemoteFS(
|
WriteIndirectBufferFromRemoteFS<T>::WriteIndirectBufferFromRemoteFS(
|
||||||
std::unique_ptr<T> impl_,
|
std::unique_ptr<T> impl_,
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
/// Stores data in S3/HDFS and adds the object key (S3 path) and object size to metadata file on local FS.
|
/// Stores data in S3/HDFS and adds the object path and object size to metadata file on local FS.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class WriteIndirectBufferFromRemoteFS final : public WriteBufferFromFileDecorator
|
class WriteIndirectBufferFromRemoteFS final : public WriteBufferFromFileDecorator
|
||||||
{
|
{
|
||||||
|
@ -2,6 +2,9 @@
|
|||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include "gtest_disk.h"
|
#include "gtest_disk.h"
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
|
|
||||||
#if !defined(__clang__)
|
#if !defined(__clang__)
|
||||||
@ -22,7 +25,7 @@ DB::DiskPtr createDisk<DB::DiskMemory>()
|
|||||||
template <>
|
template <>
|
||||||
DB::DiskPtr createDisk<DB::DiskLocal>()
|
DB::DiskPtr createDisk<DB::DiskLocal>()
|
||||||
{
|
{
|
||||||
Poco::File("tmp/").createDirectory();
|
fs::create_directory("tmp/");
|
||||||
return std::make_shared<DB::DiskLocal>("local_disk", "tmp/", 0);
|
return std::make_shared<DB::DiskLocal>("local_disk", "tmp/", 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,7 +46,7 @@ template <>
|
|||||||
void destroyDisk<DB::DiskLocal>(DB::DiskPtr & disk)
|
void destroyDisk<DB::DiskLocal>(DB::DiskPtr & disk)
|
||||||
{
|
{
|
||||||
disk.reset();
|
disk.reset();
|
||||||
Poco::File("tmp/").remove(true);
|
fs::remove_all("tmp/");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include <Formats/FormatSchemaInfo.h>
|
#include <Formats/FormatSchemaInfo.h>
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -11,6 +11,7 @@ namespace ErrorCodes
|
|||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
@ -34,55 +35,66 @@ FormatSchemaInfo::FormatSchemaInfo(const String & format_schema, const String &
|
|||||||
|
|
||||||
String default_file_extension = getFormatSchemaDefaultFileExtension(format);
|
String default_file_extension = getFormatSchemaDefaultFileExtension(format);
|
||||||
|
|
||||||
Poco::Path path;
|
fs::path path;
|
||||||
if (require_message)
|
if (require_message)
|
||||||
{
|
{
|
||||||
size_t colon_pos = format_schema.find(':');
|
size_t colon_pos = format_schema.find(':');
|
||||||
if ((colon_pos == String::npos) || (colon_pos == 0) || (colon_pos == format_schema.length() - 1)
|
if ((colon_pos == String::npos) || (colon_pos == 0) || (colon_pos == format_schema.length() - 1))
|
||||||
|| path.assign(format_schema.substr(0, colon_pos)).makeFile().getFileName().empty())
|
|
||||||
{
|
{
|
||||||
throw Exception(
|
throw Exception(
|
||||||
"Format schema requires the 'format_schema' setting to have the 'schema_file:message_name' format"
|
"Format schema requires the 'format_schema' setting to have the 'schema_file:message_name' format"
|
||||||
+ (default_file_extension.empty() ? "" : ", e.g. 'schema." + default_file_extension + ":Message'") +
|
+ (default_file_extension.empty() ? "" : ", e.g. 'schema." + default_file_extension + ":Message'") +
|
||||||
". Got '" + format_schema
|
". Got '" + format_schema + "'", ErrorCodes::BAD_ARGUMENTS);
|
||||||
+ "'",
|
}
|
||||||
ErrorCodes::BAD_ARGUMENTS);
|
else
|
||||||
|
{
|
||||||
|
path = fs::path(format_schema.substr(0, colon_pos));
|
||||||
|
String filename = path.has_filename() ? path.filename() : path.parent_path().filename();
|
||||||
|
if (filename.empty())
|
||||||
|
throw Exception(
|
||||||
|
"Format schema requires the 'format_schema' setting to have the 'schema_file:message_name' format"
|
||||||
|
+ (default_file_extension.empty() ? "" : ", e.g. 'schema." + default_file_extension + ":Message'") +
|
||||||
|
". Got '" + format_schema + "'", ErrorCodes::BAD_ARGUMENTS);
|
||||||
}
|
}
|
||||||
|
|
||||||
message_name = format_schema.substr(colon_pos + 1);
|
message_name = format_schema.substr(colon_pos + 1);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
path.assign(format_schema).makeFile().getFileName();
|
{
|
||||||
|
path = fs::path(format_schema);
|
||||||
|
if (!path.has_filename())
|
||||||
|
path = path.parent_path() / "";
|
||||||
|
}
|
||||||
|
|
||||||
auto default_schema_directory = [&format_schema_path]()
|
auto default_schema_directory = [&format_schema_path]()
|
||||||
{
|
{
|
||||||
static const String str = Poco::Path(format_schema_path).makeAbsolute().makeDirectory().toString();
|
static const String str = fs::canonical(format_schema_path) / "";
|
||||||
return str;
|
return str;
|
||||||
};
|
};
|
||||||
|
|
||||||
if (path.getExtension().empty() && !default_file_extension.empty())
|
if (!path.has_extension() && !default_file_extension.empty())
|
||||||
path.setExtension(default_file_extension);
|
path = path.parent_path() / (path.stem().string() + '.' + default_file_extension);
|
||||||
|
|
||||||
if (path.isAbsolute())
|
fs::path default_schema_directory_path(default_schema_directory());
|
||||||
|
if (path.is_absolute())
|
||||||
{
|
{
|
||||||
if (is_server)
|
if (is_server)
|
||||||
throw Exception("Absolute path in the 'format_schema' setting is prohibited: " + path.toString(), ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("Absolute path in the 'format_schema' setting is prohibited: " + path.string(), ErrorCodes::BAD_ARGUMENTS);
|
||||||
schema_path = path.getFileName();
|
schema_path = path.filename();
|
||||||
schema_directory = path.makeParent().toString();
|
schema_directory = path.parent_path() / "";
|
||||||
}
|
}
|
||||||
else if (path.depth() >= 1 && path.directory(0) == "..")
|
else if (path.has_parent_path() && !fs::weakly_canonical(default_schema_directory_path / path).string().starts_with(fs::weakly_canonical(default_schema_directory_path).string()))
|
||||||
{
|
{
|
||||||
if (is_server)
|
if (is_server)
|
||||||
throw Exception(
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Path in the 'format_schema' setting shouldn't go outside the 'format_schema_path' directory: " + path.toString(),
|
"Path in the 'format_schema' setting shouldn't go outside the 'format_schema_path' directory: {} ({} not in {})",
|
||||||
ErrorCodes::BAD_ARGUMENTS);
|
path.string());
|
||||||
path = Poco::Path(default_schema_directory()).resolve(path).toString();
|
path = default_schema_directory_path / path;
|
||||||
schema_path = path.getFileName();
|
schema_path = path.filename();
|
||||||
schema_directory = path.makeParent().toString();
|
schema_directory = path.parent_path() / "";
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
schema_path = path.toString();
|
schema_path = path;
|
||||||
schema_directory = default_schema_directory();
|
schema_directory = default_schema_directory();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,10 +3,11 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -68,21 +69,19 @@ public:
|
|||||||
{
|
{
|
||||||
const char * filename = reinterpret_cast<const char *>(&chars[source_offset]);
|
const char * filename = reinterpret_cast<const char *>(&chars[source_offset]);
|
||||||
|
|
||||||
const String user_files_path = getContext()->getUserFilesPath();
|
fs::path user_files_absolute_path = fs::canonical(fs::path(getContext()->getUserFilesPath()));
|
||||||
String user_files_absolute_path = Poco::Path(user_files_path).makeAbsolute().makeDirectory().toString();
|
fs::path file_path(filename);
|
||||||
Poco::Path poco_filepath = Poco::Path(filename);
|
if (file_path.is_relative())
|
||||||
if (poco_filepath.isRelative())
|
file_path = user_files_absolute_path / file_path;
|
||||||
poco_filepath = Poco::Path(user_files_absolute_path, poco_filepath);
|
fs::path file_absolute_path = fs::canonical(file_path);
|
||||||
const String file_absolute_path = poco_filepath.absolute().toString();
|
checkReadIsAllowedOrThrow(user_files_absolute_path.string(), file_absolute_path);
|
||||||
checkReadIsAllowedOrThrow(user_files_absolute_path, file_absolute_path);
|
|
||||||
|
|
||||||
checked_filenames[row] = file_absolute_path;
|
checked_filenames[row] = file_absolute_path.string();
|
||||||
auto file = Poco::File(file_absolute_path);
|
|
||||||
|
|
||||||
if (!file.exists())
|
if (!fs::exists(file_absolute_path))
|
||||||
throw Exception(fmt::format("File {} doesn't exist.", file_absolute_path), ErrorCodes::FILE_DOESNT_EXIST);
|
throw Exception(fmt::format("File {} doesn't exist.", file_absolute_path.string()), ErrorCodes::FILE_DOESNT_EXIST);
|
||||||
|
|
||||||
const auto current_file_size = Poco::File(file_absolute_path).getSize();
|
const auto current_file_size = fs::file_size(file_absolute_path);
|
||||||
|
|
||||||
result_offset += current_file_size + 1;
|
result_offset += current_file_size + 1;
|
||||||
res_offsets[row] = result_offset;
|
res_offsets[row] = result_offset;
|
||||||
@ -117,8 +116,8 @@ private:
|
|||||||
if (file_absolute_path.find(user_files_absolute_path) != 0)
|
if (file_absolute_path.find(user_files_absolute_path) != 0)
|
||||||
throw Exception("File is not inside " + user_files_absolute_path, ErrorCodes::DATABASE_ACCESS_DENIED);
|
throw Exception("File is not inside " + user_files_absolute_path, ErrorCodes::DATABASE_ACCESS_DENIED);
|
||||||
|
|
||||||
Poco::File path_poco_file = Poco::File(file_absolute_path);
|
fs::path fs_path(file_absolute_path);
|
||||||
if (path_poco_file.exists() && path_poco_file.isDirectory())
|
if (fs::exists(fs_path) && fs::is_directory(fs_path))
|
||||||
throw Exception("File can't be a directory", ErrorCodes::INCORRECT_FILE_NAME);
|
throw Exception("File can't be a directory", ErrorCodes::INCORRECT_FILE_NAME);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <IO/CascadeWriteBuffer.h>
|
#include <IO/CascadeWriteBuffer.h>
|
||||||
#include <IO/MemoryReadWriteBuffer.h>
|
#include <IO/MemoryReadWriteBuffer.h>
|
||||||
#include <IO/WriteBufferFromTemporaryFile.h>
|
#include <IO/WriteBufferFromTemporaryFile.h>
|
||||||
@ -9,7 +8,9 @@
|
|||||||
#include <IO/ConcatReadBuffer.h>
|
#include <IO/ConcatReadBuffer.h>
|
||||||
#include <IO/copyData.h>
|
#include <IO/copyData.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
using namespace DB;
|
using namespace DB;
|
||||||
|
|
||||||
|
|
||||||
@ -235,7 +236,7 @@ try
|
|||||||
|
|
||||||
buf.reset();
|
buf.reset();
|
||||||
reread_buf.reset();
|
reread_buf.reset();
|
||||||
ASSERT_TRUE(!Poco::File(tmp_filename).exists());
|
ASSERT_TRUE(!fs::exists(tmp_filename));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
|
@ -74,8 +74,11 @@
|
|||||||
#include <Interpreters/DatabaseCatalog.h>
|
#include <Interpreters/DatabaseCatalog.h>
|
||||||
#include <Storages/MergeTree/BackgroundJobsExecutor.h>
|
#include <Storages/MergeTree/BackgroundJobsExecutor.h>
|
||||||
#include <Storages/MergeTree/MergeTreeDataPartUUID.h>
|
#include <Storages/MergeTree/MergeTreeDataPartUUID.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
extern const Event ContextLock;
|
extern const Event ContextLock;
|
||||||
@ -2212,14 +2215,14 @@ void Context::checkCanBeDropped(const String & database, const String & table, c
|
|||||||
if (!max_size_to_drop || size <= max_size_to_drop)
|
if (!max_size_to_drop || size <= max_size_to_drop)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
Poco::File force_file(getFlagsPath() + "force_drop_table");
|
fs::path force_file(getFlagsPath() + "force_drop_table");
|
||||||
bool force_file_exists = force_file.exists();
|
bool force_file_exists = fs::exists(force_file);
|
||||||
|
|
||||||
if (force_file_exists)
|
if (force_file_exists)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
force_file.remove();
|
fs::remove(force_file);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
@ -2241,9 +2244,9 @@ void Context::checkCanBeDropped(const String & database, const String & table, c
|
|||||||
"Example:\nsudo touch '{}' && sudo chmod 666 '{}'",
|
"Example:\nsudo touch '{}' && sudo chmod 666 '{}'",
|
||||||
backQuoteIfNeed(database), backQuoteIfNeed(table),
|
backQuoteIfNeed(database), backQuoteIfNeed(table),
|
||||||
size_str, max_size_to_drop_str,
|
size_str, max_size_to_drop_str,
|
||||||
force_file.path(), force_file_exists ? "exists but not writeable (could not be removed)" : "doesn't exist",
|
force_file.string(), force_file_exists ? "exists but not writeable (could not be removed)" : "doesn't exist",
|
||||||
force_file.path(),
|
force_file.string(),
|
||||||
force_file.path(), force_file.path());
|
force_file.string(), force_file.string());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
#include <Interpreters/Cluster.h>
|
#include <Interpreters/Cluster.h>
|
||||||
#include <Common/ZooKeeper/Types.h>
|
#include <Common/ZooKeeper/Types.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
namespace Poco
|
namespace Poco
|
||||||
{
|
{
|
||||||
@ -14,6 +15,8 @@ namespace zkutil
|
|||||||
class ZooKeeper;
|
class ZooKeeper;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -100,9 +103,9 @@ struct DDLTaskBase
|
|||||||
|
|
||||||
virtual ContextMutablePtr makeQueryContext(ContextPtr from_context, const ZooKeeperPtr & zookeeper);
|
virtual ContextMutablePtr makeQueryContext(ContextPtr from_context, const ZooKeeperPtr & zookeeper);
|
||||||
|
|
||||||
inline String getActiveNodePath() const { return entry_path + "/active/" + host_id_str; }
|
inline String getActiveNodePath() const { return fs::path(entry_path) / "active" / host_id_str; }
|
||||||
inline String getFinishedNodePath() const { return entry_path + "/finished/" + host_id_str; }
|
inline String getFinishedNodePath() const { return fs::path(entry_path) / "finished" / host_id_str; }
|
||||||
inline String getShardNodePath() const { return entry_path + "/shards/" + getShardID(); }
|
inline String getShardNodePath() const { return fs::path(entry_path) / "shards" / getShardID(); }
|
||||||
|
|
||||||
static String getLogEntryName(UInt32 log_entry_number);
|
static String getLogEntryName(UInt32 log_entry_number);
|
||||||
static UInt32 getLogEntryNumber(const String & log_entry_name);
|
static UInt32 getLogEntryNumber(const String & log_entry_name);
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
#include <Databases/DatabaseMemory.h>
|
#include <Databases/DatabaseMemory.h>
|
||||||
#include <Databases/DatabaseOnDisk.h>
|
#include <Databases/DatabaseOnDisk.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <Storages/StorageMemory.h>
|
#include <Storages/StorageMemory.h>
|
||||||
#include <Storages/LiveView/TemporaryLiveViewCleaner.h>
|
#include <Storages/LiveView/TemporaryLiveViewCleaner.h>
|
||||||
@ -17,6 +16,8 @@
|
|||||||
#include <Common/CurrentMetrics.h>
|
#include <Common/CurrentMetrics.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
|
#include <filesystem>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
# include "config_core.h"
|
# include "config_core.h"
|
||||||
@ -27,8 +28,7 @@
|
|||||||
# include <Storages/StorageMaterializeMySQL.h>
|
# include <Storages/StorageMaterializeMySQL.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <filesystem>
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
@ -354,10 +354,9 @@ DatabasePtr DatabaseCatalog::detachDatabase(const String & database_name, bool d
|
|||||||
db->drop(getContext());
|
db->drop(getContext());
|
||||||
|
|
||||||
/// Old ClickHouse versions did not store database.sql files
|
/// Old ClickHouse versions did not store database.sql files
|
||||||
Poco::File database_metadata_file(
|
fs::path database_metadata_file = fs::path(getContext()->getPath()) / "metadata" / (escapeForFileName(database_name) + ".sql");
|
||||||
getContext()->getPath() + "metadata/" + escapeForFileName(database_name) + ".sql");
|
if (fs::exists(database_metadata_file))
|
||||||
if (database_metadata_file.exists())
|
fs::remove(database_metadata_file);
|
||||||
database_metadata_file.remove(false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return db;
|
return db;
|
||||||
@ -783,7 +782,7 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr
|
|||||||
}
|
}
|
||||||
|
|
||||||
addUUIDMapping(table_id.uuid);
|
addUUIDMapping(table_id.uuid);
|
||||||
drop_time = Poco::File(dropped_metadata_path).getLastModified().epochTime();
|
drop_time = FS::getModificationTime(dropped_metadata_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::lock_guard lock(tables_marked_dropped_mutex);
|
std::lock_guard lock(tables_marked_dropped_mutex);
|
||||||
@ -892,16 +891,15 @@ void DatabaseCatalog::dropTableFinally(const TableMarkedAsDropped & table)
|
|||||||
|
|
||||||
/// Even if table is not loaded, try remove its data from disk.
|
/// Even if table is not loaded, try remove its data from disk.
|
||||||
/// TODO remove data from all volumes
|
/// TODO remove data from all volumes
|
||||||
String data_path = getContext()->getPath() + "store/" + getPathForUUID(table.table_id.uuid);
|
fs::path data_path = fs::path(getContext()->getPath()) / "store" / getPathForUUID(table.table_id.uuid);
|
||||||
Poco::File table_data_dir{data_path};
|
if (fs::exists(data_path))
|
||||||
if (table_data_dir.exists())
|
|
||||||
{
|
{
|
||||||
LOG_INFO(log, "Removing data directory {} of dropped table {}", data_path, table.table_id.getNameForLogs());
|
LOG_INFO(log, "Removing data directory {} of dropped table {}", data_path.string(), table.table_id.getNameForLogs());
|
||||||
table_data_dir.remove(true);
|
fs::remove_all(data_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO(log, "Removing metadata {} of dropped table {}", table.metadata_path, table.table_id.getNameForLogs());
|
LOG_INFO(log, "Removing metadata {} of dropped table {}", table.metadata_path, table.table_id.getNameForLogs());
|
||||||
Poco::File(table.metadata_path).remove();
|
fs::remove(fs::path(table.metadata_path));
|
||||||
|
|
||||||
removeUUIDMappingFinally(table.table_id.uuid);
|
removeUUIDMappingFinally(table.table_id.uuid);
|
||||||
CurrentMetrics::sub(CurrentMetrics::TablesToDropQueueSize, 1);
|
CurrentMetrics::sub(CurrentMetrics::TablesToDropQueueSize, 1);
|
||||||
|
@ -3,12 +3,13 @@
|
|||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
#include <Common/Config/ConfigProcessor.h>
|
#include <Common/Config/ConfigProcessor.h>
|
||||||
#include <Common/getMultipleKeysFromConfig.h>
|
#include <Common/getMultipleKeysFromConfig.h>
|
||||||
|
|
||||||
#include <Poco/Glob.h>
|
#include <Poco/Glob.h>
|
||||||
#include <Poco/File.h>
|
#include <Common/filesystemHelpers.h>
|
||||||
#include <Poco/Path.h>
|
#include <filesystem>
|
||||||
|
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
ExternalLoaderXMLConfigRepository::ExternalLoaderXMLConfigRepository(
|
ExternalLoaderXMLConfigRepository::ExternalLoaderXMLConfigRepository(
|
||||||
@ -19,7 +20,7 @@ ExternalLoaderXMLConfigRepository::ExternalLoaderXMLConfigRepository(
|
|||||||
|
|
||||||
Poco::Timestamp ExternalLoaderXMLConfigRepository::getUpdateTime(const std::string & definition_entity_name)
|
Poco::Timestamp ExternalLoaderXMLConfigRepository::getUpdateTime(const std::string & definition_entity_name)
|
||||||
{
|
{
|
||||||
return Poco::File(definition_entity_name).getLastModified();
|
return FS::getModificationTimestamp(definition_entity_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::set<std::string> ExternalLoaderXMLConfigRepository::getAllLoadablesDefinitionNames()
|
std::set<std::string> ExternalLoaderXMLConfigRepository::getAllLoadablesDefinitionNames()
|
||||||
@ -36,8 +37,8 @@ std::set<std::string> ExternalLoaderXMLConfigRepository::getAllLoadablesDefiniti
|
|||||||
if (pattern[0] != '/')
|
if (pattern[0] != '/')
|
||||||
{
|
{
|
||||||
const auto app_config_path = main_config.getString("config-file", "config.xml");
|
const auto app_config_path = main_config.getString("config-file", "config.xml");
|
||||||
const auto config_dir = Poco::Path{app_config_path}.parent().toString();
|
const String config_dir = fs::path(app_config_path).parent_path();
|
||||||
const auto absolute_path = config_dir + pattern;
|
const String absolute_path = fs::path(config_dir) / pattern;
|
||||||
Poco::Glob::glob(absolute_path, files, 0);
|
Poco::Glob::glob(absolute_path, files, 0);
|
||||||
if (!files.empty())
|
if (!files.empty())
|
||||||
continue;
|
continue;
|
||||||
@ -59,7 +60,7 @@ std::set<std::string> ExternalLoaderXMLConfigRepository::getAllLoadablesDefiniti
|
|||||||
|
|
||||||
bool ExternalLoaderXMLConfigRepository::exists(const std::string & definition_entity_name)
|
bool ExternalLoaderXMLConfigRepository::exists(const std::string & definition_entity_name)
|
||||||
{
|
{
|
||||||
return Poco::File(definition_entity_name).exists();
|
return fs::exists(fs::path(definition_entity_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::AutoPtr<Poco::Util::AbstractConfiguration> ExternalLoaderXMLConfigRepository::load(
|
Poco::AutoPtr<Poco::Util::AbstractConfiguration> ExternalLoaderXMLConfigRepository::load(
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||||
|
@ -542,7 +542,7 @@ void InterpreterSystemQuery::dropReplica(ASTSystemQuery & query)
|
|||||||
else if (!query.replica_zk_path.empty())
|
else if (!query.replica_zk_path.empty())
|
||||||
{
|
{
|
||||||
getContext()->checkAccess(AccessType::SYSTEM_DROP_REPLICA);
|
getContext()->checkAccess(AccessType::SYSTEM_DROP_REPLICA);
|
||||||
auto remote_replica_path = query.replica_zk_path + "/replicas/" + query.replica;
|
String remote_replica_path = fs::path(query.replica_zk_path) / "replicas" / query.replica;
|
||||||
|
|
||||||
/// This check is actually redundant, but it may prevent from some user mistakes
|
/// This check is actually redundant, but it may prevent from some user mistakes
|
||||||
for (auto & elem : DatabaseCatalog::instance().getDatabases())
|
for (auto & elem : DatabaseCatalog::instance().getDatabases())
|
||||||
|
@ -18,7 +18,9 @@
|
|||||||
|
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -54,13 +56,13 @@ static void loadDatabase(
|
|||||||
String database_attach_query;
|
String database_attach_query;
|
||||||
String database_metadata_file = database_path + ".sql";
|
String database_metadata_file = database_path + ".sql";
|
||||||
|
|
||||||
if (Poco::File(database_metadata_file).exists())
|
if (fs::exists(fs::path(database_metadata_file)))
|
||||||
{
|
{
|
||||||
/// There is .sql file with database creation statement.
|
/// There is .sql file with database creation statement.
|
||||||
ReadBufferFromFile in(database_metadata_file, 1024);
|
ReadBufferFromFile in(database_metadata_file, 1024);
|
||||||
readStringUntilEOF(database_attach_query, in);
|
readStringUntilEOF(database_attach_query, in);
|
||||||
}
|
}
|
||||||
else if (Poco::File(database_path).exists())
|
else if (fs::exists(fs::path(database_path)))
|
||||||
{
|
{
|
||||||
/// Database exists, but .sql file is absent. It's old-style Ordinary database (e.g. system or default)
|
/// Database exists, but .sql file is absent. It's old-style Ordinary database (e.g. system or default)
|
||||||
database_attach_query = "ATTACH DATABASE " + backQuoteIfNeed(database) + " ENGINE = Ordinary";
|
database_attach_query = "ATTACH DATABASE " + backQuoteIfNeed(database) + " ENGINE = Ordinary";
|
||||||
@ -95,34 +97,35 @@ void loadMetadata(ContextMutablePtr context, const String & default_database_nam
|
|||||||
* This file is deleted after successful loading of tables.
|
* This file is deleted after successful loading of tables.
|
||||||
* (flag is "one-shot")
|
* (flag is "one-shot")
|
||||||
*/
|
*/
|
||||||
Poco::File force_restore_data_flag_file(context->getFlagsPath() + "force_restore_data");
|
auto force_restore_data_flag_file = fs::path(context->getFlagsPath()) / "force_restore_data";
|
||||||
bool has_force_restore_data_flag = force_restore_data_flag_file.exists();
|
bool has_force_restore_data_flag = fs::exists(force_restore_data_flag_file);
|
||||||
|
|
||||||
/// Loop over databases.
|
/// Loop over databases.
|
||||||
std::map<String, String> databases;
|
std::map<String, String> databases;
|
||||||
Poco::DirectoryIterator dir_end;
|
fs::directory_iterator dir_end;
|
||||||
for (Poco::DirectoryIterator it(path); it != dir_end; ++it)
|
for (fs::directory_iterator it(path); it != dir_end; ++it)
|
||||||
{
|
{
|
||||||
if (it->isLink())
|
if (it->is_symlink())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!it->isDirectory())
|
const auto current_file = it->path().filename().string();
|
||||||
|
if (!it->is_directory())
|
||||||
{
|
{
|
||||||
/// TODO: DETACH DATABASE PERMANENTLY ?
|
/// TODO: DETACH DATABASE PERMANENTLY ?
|
||||||
if (endsWith(it.name(), ".sql"))
|
if (fs::path(current_file).extension() == ".sql")
|
||||||
{
|
{
|
||||||
String db_name = it.name().substr(0, it.name().size() - 4);
|
String db_name = fs::path(current_file).stem();
|
||||||
if (db_name != DatabaseCatalog::SYSTEM_DATABASE)
|
if (db_name != DatabaseCatalog::SYSTEM_DATABASE)
|
||||||
databases.emplace(unescapeForFileName(db_name), path + "/" + db_name);
|
databases.emplace(unescapeForFileName(db_name), fs::path(path) / db_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Temporary fails may be left from previous server runs.
|
/// Temporary fails may be left from previous server runs.
|
||||||
if (endsWith(it.name(), ".tmp"))
|
if (fs::path(current_file).extension() == ".tmp")
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Removing temporary file {}", it->path());
|
LOG_WARNING(log, "Removing temporary file {}", it->path().string());
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
it->remove();
|
fs::remove(it->path());
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -135,13 +138,13 @@ void loadMetadata(ContextMutablePtr context, const String & default_database_nam
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// For '.svn', '.gitignore' directory and similar.
|
/// For '.svn', '.gitignore' directory and similar.
|
||||||
if (it.name().at(0) == '.')
|
if (current_file.at(0) == '.')
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (it.name() == DatabaseCatalog::SYSTEM_DATABASE)
|
if (current_file == DatabaseCatalog::SYSTEM_DATABASE)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
databases.emplace(unescapeForFileName(it.name()), it.path().toString());
|
databases.emplace(unescapeForFileName(current_file), it->path().string());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// clickhouse-local creates DatabaseMemory as default database by itself
|
/// clickhouse-local creates DatabaseMemory as default database by itself
|
||||||
@ -158,7 +161,7 @@ void loadMetadata(ContextMutablePtr context, const String & default_database_nam
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
force_restore_data_flag_file.remove();
|
fs::remove(force_restore_data_flag_file);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -172,7 +175,7 @@ void loadMetadataSystem(ContextMutablePtr context)
|
|||||||
{
|
{
|
||||||
String path = context->getPath() + "metadata/" + DatabaseCatalog::SYSTEM_DATABASE;
|
String path = context->getPath() + "metadata/" + DatabaseCatalog::SYSTEM_DATABASE;
|
||||||
String metadata_file = path + ".sql";
|
String metadata_file = path + ".sql";
|
||||||
if (Poco::File(path).exists() || Poco::File(metadata_file).exists())
|
if (fs::exists(fs::path(path)) || fs::exists(fs::path(metadata_file)))
|
||||||
{
|
{
|
||||||
/// 'has_force_restore_data_flag' is true, to not fail on loading query_log table, if it is corrupted.
|
/// 'has_force_restore_data_flag' is true, to not fail on loading query_log table, if it is corrupted.
|
||||||
loadDatabase(context, DatabaseCatalog::SYSTEM_DATABASE, path, true);
|
loadDatabase(context, DatabaseCatalog::SYSTEM_DATABASE, path, true);
|
||||||
|
@ -1,11 +1,8 @@
|
|||||||
#include <Server/HTTP/HTTPServerResponse.h>
|
#include <Server/HTTP/HTTPServerResponse.h>
|
||||||
|
|
||||||
#include <Server/HTTP/HTTPServerRequest.h>
|
#include <Server/HTTP/HTTPServerRequest.h>
|
||||||
|
|
||||||
#include <Poco/CountingStream.h>
|
#include <Poco/CountingStream.h>
|
||||||
#include <Poco/DateTimeFormat.h>
|
#include <Poco/DateTimeFormat.h>
|
||||||
#include <Poco/DateTimeFormatter.h>
|
#include <Poco/DateTimeFormatter.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/FileStream.h>
|
#include <Poco/FileStream.h>
|
||||||
#include <Poco/Net/HTTPChunkedStream.h>
|
#include <Poco/Net/HTTPChunkedStream.h>
|
||||||
#include <Poco/Net/HTTPFixedLengthStream.h>
|
#include <Poco/Net/HTTPFixedLengthStream.h>
|
||||||
@ -13,6 +10,7 @@
|
|||||||
#include <Poco/Net/HTTPStream.h>
|
#include <Poco/Net/HTTPStream.h>
|
||||||
#include <Poco/StreamCopier.h>
|
#include <Poco/StreamCopier.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -39,7 +39,6 @@
|
|||||||
|
|
||||||
#include <Poco/Base64Decoder.h>
|
#include <Poco/Base64Decoder.h>
|
||||||
#include <Poco/Base64Encoder.h>
|
#include <Poco/Base64Encoder.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Net/HTTPBasicCredentials.h>
|
#include <Poco/Net/HTTPBasicCredentials.h>
|
||||||
#include <Poco/Net/HTTPStream.h>
|
#include <Poco/Net/HTTPStream.h>
|
||||||
#include <Poco/Net/NetException.h>
|
#include <Poco/Net/NetException.h>
|
||||||
|
@ -14,14 +14,15 @@
|
|||||||
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Net/HTTPServerRequest.h>
|
#include <Poco/Net/HTTPServerRequest.h>
|
||||||
#include <Poco/Net/HTTPServerResponse.h>
|
#include <Poco/Net/HTTPServerResponse.h>
|
||||||
#include <Poco/Net/HTTPRequestHandlerFactory.h>
|
#include <Poco/Net/HTTPRequestHandlerFactory.h>
|
||||||
#include <Poco/Util/LayeredConfiguration.h>
|
#include <Poco/Util/LayeredConfiguration.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -137,11 +138,14 @@ void StaticRequestHandler::writeResponse(WriteBuffer & out)
|
|||||||
|
|
||||||
if (startsWith(response_expression, file_prefix))
|
if (startsWith(response_expression, file_prefix))
|
||||||
{
|
{
|
||||||
const auto & user_files_absolute_path = Poco::Path(server.context()->getUserFilesPath()).makeAbsolute().makeDirectory().toString();
|
auto file_name = response_expression.substr(file_prefix.size(), response_expression.size() - file_prefix.size());
|
||||||
const auto & file_name = response_expression.substr(file_prefix.size(), response_expression.size() - file_prefix.size());
|
if (file_name.starts_with('/'))
|
||||||
|
file_name = file_name.substr(1);
|
||||||
|
|
||||||
const auto & file_path = Poco::Path(user_files_absolute_path, file_name).makeAbsolute().toString();
|
fs::path user_files_absolute_path = fs::canonical(fs::path(server.context()->getUserFilesPath()));
|
||||||
if (!Poco::File(file_path).exists())
|
String file_path = fs::weakly_canonical(user_files_absolute_path / file_name);
|
||||||
|
|
||||||
|
if (!fs::exists(file_path))
|
||||||
throw Exception("Invalid file name " + file_path + " for static HTTPHandler. ", ErrorCodes::INCORRECT_FILE_NAME);
|
throw Exception("Invalid file name " + file_path + " for static HTTPHandler. ", ErrorCodes::INCORRECT_FILE_NAME);
|
||||||
|
|
||||||
ReadBufferFromFile in(file_path);
|
ReadBufferFromFile in(file_path);
|
||||||
|
@ -25,11 +25,9 @@
|
|||||||
#include <IO/ConnectionTimeoutsContext.h>
|
#include <IO/ConnectionTimeoutsContext.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
#include <Disks/IDisk.h>
|
#include <Disks/IDisk.h>
|
||||||
|
|
||||||
#include <boost/algorithm/string/find_iterator.hpp>
|
#include <boost/algorithm/string/find_iterator.hpp>
|
||||||
#include <boost/algorithm/string/finder.hpp>
|
#include <boost/algorithm/string/finder.hpp>
|
||||||
|
#include <filesystem>
|
||||||
#include <Poco/DirectoryIterator.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
@ -39,6 +37,8 @@ namespace CurrentMetrics
|
|||||||
extern const Metric BrokenDistributedFilesToInsert;
|
extern const Metric BrokenDistributedFilesToInsert;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -293,7 +293,7 @@ StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor(
|
|||||||
, pool(std::move(pool_))
|
, pool(std::move(pool_))
|
||||||
, disk(disk_)
|
, disk(disk_)
|
||||||
, relative_path(relative_path_)
|
, relative_path(relative_path_)
|
||||||
, path(disk->getPath() + relative_path + '/')
|
, path(fs::path(disk->getPath()) / relative_path / "")
|
||||||
, should_batch_inserts(storage.getContext()->getSettingsRef().distributed_directory_monitor_batch_inserts)
|
, should_batch_inserts(storage.getContext()->getSettingsRef().distributed_directory_monitor_batch_inserts)
|
||||||
, dir_fsync(storage.getDistributedSettingsRef().fsync_directories)
|
, dir_fsync(storage.getDistributedSettingsRef().fsync_directories)
|
||||||
, min_batched_block_size_rows(storage.getContext()->getSettingsRef().min_insert_block_size_rows)
|
, min_batched_block_size_rows(storage.getContext()->getSettingsRef().min_insert_block_size_rows)
|
||||||
@ -347,7 +347,7 @@ void StorageDistributedDirectoryMonitor::shutdownAndDropAllData()
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto dir_sync_guard = getDirectorySyncGuard(dir_fsync, disk, relative_path);
|
auto dir_sync_guard = getDirectorySyncGuard(dir_fsync, disk, relative_path);
|
||||||
Poco::File(path).remove(true);
|
fs::remove_all(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -490,16 +490,14 @@ std::map<UInt64, std::string> StorageDistributedDirectoryMonitor::getFiles()
|
|||||||
std::map<UInt64, std::string> files;
|
std::map<UInt64, std::string> files;
|
||||||
size_t new_bytes_count = 0;
|
size_t new_bytes_count = 0;
|
||||||
|
|
||||||
Poco::DirectoryIterator end;
|
fs::directory_iterator end;
|
||||||
for (Poco::DirectoryIterator it{path}; it != end; ++it)
|
for (fs::directory_iterator it{path}; it != end; ++it)
|
||||||
{
|
{
|
||||||
const auto & file_path_str = it->path();
|
const auto & file_path_str = it->path();
|
||||||
Poco::Path file_path{file_path_str};
|
if (!it->is_directory() && startsWith(fs::path(file_path_str).extension(), ".bin"))
|
||||||
|
|
||||||
if (!it->isDirectory() && startsWith(file_path.getExtension(), "bin"))
|
|
||||||
{
|
{
|
||||||
files[parse<UInt64>(file_path.getBaseName())] = file_path_str;
|
files[parse<UInt64>(fs::path(file_path_str).stem())] = file_path_str;
|
||||||
new_bytes_count += Poco::File(file_path).getSize();
|
new_bytes_count += fs::file_size(fs::path(file_path_str));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -663,8 +661,7 @@ struct StorageDistributedDirectoryMonitor::Batch
|
|||||||
String tmp_file{parent.current_batch_file_path + ".tmp"};
|
String tmp_file{parent.current_batch_file_path + ".tmp"};
|
||||||
|
|
||||||
auto dir_sync_guard = getDirectorySyncGuard(dir_fsync, parent.disk, parent.relative_path);
|
auto dir_sync_guard = getDirectorySyncGuard(dir_fsync, parent.disk, parent.relative_path);
|
||||||
|
if (fs::exists(tmp_file))
|
||||||
if (Poco::File{tmp_file}.exists())
|
|
||||||
LOG_ERROR(parent.log, "Temporary file {} exists. Unclean shutdown?", backQuote(tmp_file));
|
LOG_ERROR(parent.log, "Temporary file {} exists. Unclean shutdown?", backQuote(tmp_file));
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -676,7 +673,7 @@ struct StorageDistributedDirectoryMonitor::Batch
|
|||||||
out.sync();
|
out.sync();
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::File{tmp_file}.renameTo(parent.current_batch_file_path);
|
fs::rename(tmp_file, parent.current_batch_file_path);
|
||||||
}
|
}
|
||||||
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(parent.storage.getContext()->getSettingsRef());
|
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(parent.storage.getContext()->getSettingsRef());
|
||||||
auto connection = parent.pool->get(timeouts);
|
auto connection = parent.pool->get(timeouts);
|
||||||
@ -757,7 +754,7 @@ struct StorageDistributedDirectoryMonitor::Batch
|
|||||||
total_bytes = 0;
|
total_bytes = 0;
|
||||||
recovered = false;
|
recovered = false;
|
||||||
|
|
||||||
Poco::File{parent.current_batch_file_path}.setSize(0);
|
fs::resize_file(parent.current_batch_file_path, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void writeText(WriteBuffer & out)
|
void writeText(WriteBuffer & out)
|
||||||
@ -850,7 +847,7 @@ void StorageDistributedDirectoryMonitor::processFilesWithBatching(const std::map
|
|||||||
{
|
{
|
||||||
std::unordered_set<UInt64> file_indices_to_skip;
|
std::unordered_set<UInt64> file_indices_to_skip;
|
||||||
|
|
||||||
if (Poco::File{current_batch_file_path}.exists())
|
if (fs::exists(current_batch_file_path))
|
||||||
{
|
{
|
||||||
/// Possibly, we failed to send a batch on the previous iteration. Try to send exactly the same batch.
|
/// Possibly, we failed to send a batch on the previous iteration. Try to send exactly the same batch.
|
||||||
Batch batch(*this, files);
|
Batch batch(*this, files);
|
||||||
@ -951,8 +948,8 @@ void StorageDistributedDirectoryMonitor::processFilesWithBatching(const std::map
|
|||||||
|
|
||||||
/// current_batch.txt will not exist if there was no send
|
/// current_batch.txt will not exist if there was no send
|
||||||
/// (this is the case when all batches that was pending has been marked as pending)
|
/// (this is the case when all batches that was pending has been marked as pending)
|
||||||
if (Poco::File{current_batch_file_path}.exists())
|
if (fs::exists(current_batch_file_path))
|
||||||
Poco::File{current_batch_file_path}.remove();
|
fs::remove(current_batch_file_path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -961,20 +958,18 @@ void StorageDistributedDirectoryMonitor::markAsBroken(const std::string & file_p
|
|||||||
const auto last_path_separator_pos = file_path.rfind('/');
|
const auto last_path_separator_pos = file_path.rfind('/');
|
||||||
const auto & base_path = file_path.substr(0, last_path_separator_pos + 1);
|
const auto & base_path = file_path.substr(0, last_path_separator_pos + 1);
|
||||||
const auto & file_name = file_path.substr(last_path_separator_pos + 1);
|
const auto & file_name = file_path.substr(last_path_separator_pos + 1);
|
||||||
const auto & broken_path = base_path + "broken/";
|
const String & broken_path = fs::path(base_path) / "broken/";
|
||||||
const auto & broken_file_path = broken_path + file_name;
|
const String & broken_file_path = fs::path(broken_path) / file_name;
|
||||||
|
|
||||||
Poco::File{broken_path}.createDirectory();
|
fs::create_directory(broken_path);
|
||||||
|
|
||||||
auto dir_sync_guard = getDirectorySyncGuard(dir_fsync, disk, relative_path);
|
auto dir_sync_guard = getDirectorySyncGuard(dir_fsync, disk, relative_path);
|
||||||
auto broken_dir_sync_guard = getDirectorySyncGuard(dir_fsync, disk, relative_path + "/broken/");
|
auto broken_dir_sync_guard = getDirectorySyncGuard(dir_fsync, disk, fs::path(relative_path) / "broken/");
|
||||||
|
|
||||||
Poco::File file(file_path);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard status_lock(status_mutex);
|
std::lock_guard status_lock(status_mutex);
|
||||||
|
|
||||||
size_t file_size = file.getSize();
|
size_t file_size = fs::file_size(file_path);
|
||||||
|
|
||||||
--status.files_count;
|
--status.files_count;
|
||||||
status.bytes_count -= file_size;
|
status.bytes_count -= file_size;
|
||||||
@ -985,15 +980,13 @@ void StorageDistributedDirectoryMonitor::markAsBroken(const std::string & file_p
|
|||||||
metric_broken_files.add();
|
metric_broken_files.add();
|
||||||
}
|
}
|
||||||
|
|
||||||
file.renameTo(broken_file_path);
|
fs::rename(file_path, broken_file_path);
|
||||||
|
|
||||||
LOG_ERROR(log, "Renamed `{}` to `{}`", file_path, broken_file_path);
|
LOG_ERROR(log, "Renamed `{}` to `{}`", file_path, broken_file_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void StorageDistributedDirectoryMonitor::markAsSend(const std::string & file_path)
|
void StorageDistributedDirectoryMonitor::markAsSend(const std::string & file_path)
|
||||||
{
|
{
|
||||||
Poco::File file(file_path);
|
size_t file_size = fs::file_size(file_path);
|
||||||
|
|
||||||
size_t file_size = file.getSize();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard status_lock(status_mutex);
|
std::lock_guard status_lock(status_mutex);
|
||||||
@ -1002,7 +995,7 @@ void StorageDistributedDirectoryMonitor::markAsSend(const std::string & file_pat
|
|||||||
status.bytes_count -= file_size;
|
status.bytes_count -= file_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
file.remove();
|
fs::remove(file_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool StorageDistributedDirectoryMonitor::maybeMarkAsBroken(const std::string & file_path, const Exception & e)
|
bool StorageDistributedDirectoryMonitor::maybeMarkAsBroken(const std::string & file_path, const Exception & e)
|
||||||
@ -1030,7 +1023,7 @@ void StorageDistributedDirectoryMonitor::updatePath(const std::string & new_rela
|
|||||||
{
|
{
|
||||||
std::lock_guard status_lock(status_mutex);
|
std::lock_guard status_lock(status_mutex);
|
||||||
relative_path = new_relative_path;
|
relative_path = new_relative_path;
|
||||||
path = disk->getPath() + relative_path + '/';
|
path = fs::path(disk->getPath()) / relative_path / "";
|
||||||
}
|
}
|
||||||
current_batch_file_path = path + "current_batch.txt";
|
current_batch_file_path = path + "current_batch.txt";
|
||||||
|
|
||||||
|
@ -33,11 +33,10 @@
|
|||||||
#include <ext/range.h>
|
#include <ext/range.h>
|
||||||
#include <ext/scope_guard.h>
|
#include <ext/scope_guard.h>
|
||||||
|
|
||||||
#include <Poco/DirectoryIterator.h>
|
|
||||||
|
|
||||||
#include <future>
|
#include <future>
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
@ -50,10 +49,11 @@ namespace ProfileEvents
|
|||||||
extern const Event DistributedSyncInsertionTimeoutExceeded;
|
extern const Event DistributedSyncInsertionTimeoutExceeded;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
@ -660,10 +660,10 @@ void DistributedBlockOutputStream::writeToShard(const Block & block, const std::
|
|||||||
/// hardlinking to ensure the inode is not freed until we're done
|
/// hardlinking to ensure the inode is not freed until we're done
|
||||||
{
|
{
|
||||||
const std::string path(disk_path + data_path + *it);
|
const std::string path(disk_path + data_path + *it);
|
||||||
Poco::File(path).createDirectory();
|
|
||||||
|
|
||||||
const std::string tmp_path(path + "/tmp/");
|
const std::string tmp_path(path + "/tmp/");
|
||||||
Poco::File(tmp_path).createDirectory();
|
|
||||||
|
fs::create_directory(path);
|
||||||
|
fs::create_directory(tmp_path);
|
||||||
|
|
||||||
const std::string file_name(toString(storage.file_names_increment.get()) + ".bin");
|
const std::string file_name(toString(storage.file_names_increment.get()) + ".bin");
|
||||||
|
|
||||||
@ -717,7 +717,7 @@ void DistributedBlockOutputStream::writeToShard(const Block & block, const std::
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create hardlink here to reuse increment number
|
// Create hardlink here to reuse increment number
|
||||||
const std::string block_file_path(path + '/' + file_name);
|
const std::string block_file_path(fs::path(path) / file_name);
|
||||||
createHardLink(first_file_tmp_path, block_file_path);
|
createHardLink(first_file_tmp_path, block_file_path);
|
||||||
auto dir_sync_guard = make_directory_sync_guard(*it);
|
auto dir_sync_guard = make_directory_sync_guard(*it);
|
||||||
}
|
}
|
||||||
@ -726,18 +726,18 @@ void DistributedBlockOutputStream::writeToShard(const Block & block, const std::
|
|||||||
/// Make hardlinks
|
/// Make hardlinks
|
||||||
for (; it != dir_names.end(); ++it)
|
for (; it != dir_names.end(); ++it)
|
||||||
{
|
{
|
||||||
const std::string path(disk_path + data_path + *it);
|
const std::string path(fs::path(disk_path) / (data_path + *it));
|
||||||
Poco::File(path).createDirectory();
|
fs::create_directory(path);
|
||||||
|
|
||||||
const std::string block_file_path(path + '/' + toString(storage.file_names_increment.get()) + ".bin");
|
const std::string block_file_path(fs::path(path) / (toString(storage.file_names_increment.get()) + ".bin"));
|
||||||
createHardLink(first_file_tmp_path, block_file_path);
|
createHardLink(first_file_tmp_path, block_file_path);
|
||||||
auto dir_sync_guard = make_directory_sync_guard(*it);
|
auto dir_sync_guard = make_directory_sync_guard(*it);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto file_size = Poco::File(first_file_tmp_path).getSize();
|
auto file_size = fs::file_size(first_file_tmp_path);
|
||||||
/// remove the temporary file, enabling the OS to reclaim inode after all threads
|
/// remove the temporary file, enabling the OS to reclaim inode after all threads
|
||||||
/// have removed their corresponding files
|
/// have removed their corresponding files
|
||||||
Poco::File(first_file_tmp_path).remove();
|
fs::remove(first_file_tmp_path);
|
||||||
|
|
||||||
/// Notify
|
/// Notify
|
||||||
auto sleep_ms = context->getSettingsRef().distributed_directory_monitor_sleep_time_ms;
|
auto sleep_ms = context->getSettingsRef().distributed_directory_monitor_sleep_time_ms;
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
#include <DataStreams/IBlockOutputStream.h>
|
#include <DataStreams/IBlockOutputStream.h>
|
||||||
#include <DataStreams/OwningBlockInputStream.h>
|
#include <DataStreams/OwningBlockInputStream.h>
|
||||||
#include <DataStreams/IBlockInputStream.h>
|
#include <DataStreams/IBlockInputStream.h>
|
||||||
|
|
||||||
#include <Common/parseGlobs.h>
|
#include <Common/parseGlobs.h>
|
||||||
#include <Poco/URI.h>
|
#include <Poco/URI.h>
|
||||||
#include <re2/re2.h>
|
#include <re2/re2.h>
|
||||||
@ -26,7 +25,9 @@
|
|||||||
#include <hdfs/hdfs.h>
|
#include <hdfs/hdfs.h>
|
||||||
#include <Processors/Sources/SourceWithProgress.h>
|
#include <Processors/Sources/SourceWithProgress.h>
|
||||||
#include <Processors/Pipe.h>
|
#include <Processors/Pipe.h>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -257,7 +258,7 @@ Strings LSWithRegexpMatching(const String & path_for_ls, const HDFSFSPtr & fs, c
|
|||||||
{
|
{
|
||||||
if (re2::RE2::FullMatch(file_name, matcher))
|
if (re2::RE2::FullMatch(file_name, matcher))
|
||||||
{
|
{
|
||||||
Strings result_part = LSWithRegexpMatching(full_path + "/", fs, suffix_with_globs.substr(next_slash));
|
Strings result_part = LSWithRegexpMatching(fs::path(full_path) / "", fs, suffix_with_globs.substr(next_slash));
|
||||||
/// Recursion depth is limited by pattern. '*' works only for depth = 1, for depth = 2 pattern path is '*/*'. So we do not need additional check.
|
/// Recursion depth is limited by pattern. '*' works only for depth = 1, for depth = 2 pattern path is '*/*'. So we do not need additional check.
|
||||||
std::move(result_part.begin(), result_part.end(), std::back_inserter(result));
|
std::move(result_part.begin(), result_part.end(), std::back_inserter(result));
|
||||||
}
|
}
|
||||||
|
@ -13,11 +13,11 @@
|
|||||||
#include <Common/NetException.h>
|
#include <Common/NetException.h>
|
||||||
#include <IO/createReadBufferFromFileBase.h>
|
#include <IO/createReadBufferFromFileBase.h>
|
||||||
#include <ext/scope_guard.h>
|
#include <ext/scope_guard.h>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Net/HTTPRequest.h>
|
#include <Poco/Net/HTTPRequest.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
{
|
{
|
||||||
extern const Metric ReplicatedSend;
|
extern const Metric ReplicatedSend;
|
||||||
@ -289,7 +289,7 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk(
|
|||||||
{
|
{
|
||||||
String file_name = it.first;
|
String file_name = it.first;
|
||||||
|
|
||||||
String path = part->getFullRelativePath() + file_name;
|
String path = fs::path(part->getFullRelativePath()) / file_name;
|
||||||
|
|
||||||
UInt64 size = disk->getFileSize(path);
|
UInt64 size = disk->getFileSize(path);
|
||||||
|
|
||||||
@ -339,15 +339,15 @@ void Service::sendPartS3Metadata(const MergeTreeData::DataPartPtr & part, WriteB
|
|||||||
{
|
{
|
||||||
String file_name = it.first;
|
String file_name = it.first;
|
||||||
|
|
||||||
String metadata_file = disk->getPath() + part->getFullRelativePath() + file_name;
|
String metadata_file = fs::path(disk->getPath()) / part->getFullRelativePath() / file_name;
|
||||||
|
|
||||||
Poco::File metadata(metadata_file);
|
fs::path metadata(metadata_file);
|
||||||
|
|
||||||
if (!metadata.exists())
|
if (!fs::exists(metadata))
|
||||||
throw Exception("S3 metadata '" + file_name + "' is not exists", ErrorCodes::CORRUPTED_DATA);
|
throw Exception("S3 metadata '" + file_name + "' is not exists", ErrorCodes::CORRUPTED_DATA);
|
||||||
if (!metadata.isFile())
|
if (!fs::is_regular_file(metadata))
|
||||||
throw Exception("S3 metadata '" + file_name + "' is not a file", ErrorCodes::CORRUPTED_DATA);
|
throw Exception("S3 metadata '" + file_name + "' is not a file", ErrorCodes::CORRUPTED_DATA);
|
||||||
UInt64 file_size = metadata.getSize();
|
UInt64 file_size = fs::file_size(metadata);
|
||||||
|
|
||||||
writeStringBinary(it.first, out);
|
writeStringBinary(it.first, out);
|
||||||
writeBinary(file_size, out);
|
writeBinary(file_size, out);
|
||||||
@ -570,7 +570,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart(
|
|||||||
readUUIDText(part_uuid, in);
|
readUUIDText(part_uuid, in);
|
||||||
|
|
||||||
auto storage_id = data.getStorageID();
|
auto storage_id = data.getStorageID();
|
||||||
String new_part_path = part_type == "InMemory" ? "memory" : data.getFullPathOnDisk(reservation->getDisk()) + part_name + "/";
|
String new_part_path = part_type == "InMemory" ? "memory" : fs::path(data.getFullPathOnDisk(reservation->getDisk())) / part_name / "";
|
||||||
auto entry = data.getContext()->getReplicatedFetchList().insert(
|
auto entry = data.getContext()->getReplicatedFetchList().insert(
|
||||||
storage_id.getDatabaseName(), storage_id.getTableName(),
|
storage_id.getDatabaseName(), storage_id.getTableName(),
|
||||||
part_info.partition_id, part_name, new_part_path,
|
part_info.partition_id, part_name, new_part_path,
|
||||||
@ -681,13 +681,13 @@ void Fetcher::downloadBaseOrProjectionPartToDisk(
|
|||||||
|
|
||||||
/// File must be inside "absolute_part_path" directory.
|
/// File must be inside "absolute_part_path" directory.
|
||||||
/// Otherwise malicious ClickHouse replica may force us to write to arbitrary path.
|
/// Otherwise malicious ClickHouse replica may force us to write to arbitrary path.
|
||||||
String absolute_file_path = Poco::Path(part_download_path + file_name).absolute().toString();
|
String absolute_file_path = fs::weakly_canonical(fs::path(part_download_path) / file_name);
|
||||||
if (!startsWith(absolute_file_path, Poco::Path(part_download_path).absolute().toString()))
|
if (!startsWith(absolute_file_path, fs::weakly_canonical(part_download_path).string()))
|
||||||
throw Exception("File path (" + absolute_file_path + ") doesn't appear to be inside part path (" + part_download_path + ")."
|
throw Exception("File path (" + absolute_file_path + ") doesn't appear to be inside part path (" + part_download_path + ")."
|
||||||
" This may happen if we are trying to download part from malicious replica or logical error.",
|
" This may happen if we are trying to download part from malicious replica or logical error.",
|
||||||
ErrorCodes::INSECURE_PATH);
|
ErrorCodes::INSECURE_PATH);
|
||||||
|
|
||||||
auto file_out = disk->writeFile(part_download_path + file_name);
|
auto file_out = disk->writeFile(fs::path(part_download_path) / file_name);
|
||||||
HashingWriteBuffer hashing_out(*file_out);
|
HashingWriteBuffer hashing_out(*file_out);
|
||||||
copyData(in, hashing_out, file_size, blocker.getCounter());
|
copyData(in, hashing_out, file_size, blocker.getCounter());
|
||||||
|
|
||||||
@ -704,7 +704,7 @@ void Fetcher::downloadBaseOrProjectionPartToDisk(
|
|||||||
readPODBinary(expected_hash, in);
|
readPODBinary(expected_hash, in);
|
||||||
|
|
||||||
if (expected_hash != hashing_out.getHash())
|
if (expected_hash != hashing_out.getHash())
|
||||||
throw Exception("Checksum mismatch for file " + fullPath(disk, part_download_path + file_name) + " transferred from " + replica_path,
|
throw Exception("Checksum mismatch for file " + fullPath(disk, (fs::path(part_download_path) / file_name).string()) + " transferred from " + replica_path,
|
||||||
ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||||
|
|
||||||
if (file_name != "checksums.txt" &&
|
if (file_name != "checksums.txt" &&
|
||||||
@ -811,7 +811,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3(
|
|||||||
String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_;
|
String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_;
|
||||||
|
|
||||||
String part_relative_path = String(to_detached ? "detached/" : "") + tmp_prefix + part_name;
|
String part_relative_path = String(to_detached ? "detached/" : "") + tmp_prefix + part_name;
|
||||||
String part_download_path = data.getRelativeDataPath() + part_relative_path + "/";
|
String part_download_path = fs::path(data.getRelativeDataPath()) / part_relative_path / "";
|
||||||
|
|
||||||
if (disk->exists(part_download_path))
|
if (disk->exists(part_download_path))
|
||||||
throw Exception("Directory " + fullPath(disk, part_download_path) + " already exists.", ErrorCodes::DIRECTORY_ALREADY_EXISTS);
|
throw Exception("Directory " + fullPath(disk, part_download_path) + " already exists.", ErrorCodes::DIRECTORY_ALREADY_EXISTS);
|
||||||
@ -833,7 +833,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToS3(
|
|||||||
readStringBinary(file_name, in);
|
readStringBinary(file_name, in);
|
||||||
readBinary(file_size, in);
|
readBinary(file_size, in);
|
||||||
|
|
||||||
String data_path = part_download_path + file_name;
|
String data_path = fs::path(part_download_path) / file_name;
|
||||||
String metadata_file = fullPath(disk, data_path);
|
String metadata_file = fullPath(disk, data_path);
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -69,7 +69,7 @@ void IMergeTreeDataPart::MinMaxIndex::load(const MergeTreeData & data, const Dis
|
|||||||
hyperrectangle.reserve(minmax_idx_size);
|
hyperrectangle.reserve(minmax_idx_size);
|
||||||
for (size_t i = 0; i < minmax_idx_size; ++i)
|
for (size_t i = 0; i < minmax_idx_size; ++i)
|
||||||
{
|
{
|
||||||
String file_name = part_path + "minmax_" + escapeForFileName(minmax_column_names[i]) + ".idx";
|
String file_name = fs::path(part_path) / ("minmax_" + escapeForFileName(minmax_column_names[i]) + ".idx");
|
||||||
auto file = openForReading(disk_, file_name);
|
auto file = openForReading(disk_, file_name);
|
||||||
auto serialization = minmax_column_types[i]->getDefaultSerialization();
|
auto serialization = minmax_column_types[i]->getDefaultSerialization();
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ void IMergeTreeDataPart::MinMaxIndex::store(
|
|||||||
String file_name = "minmax_" + escapeForFileName(column_names[i]) + ".idx";
|
String file_name = "minmax_" + escapeForFileName(column_names[i]) + ".idx";
|
||||||
auto serialization = data_types.at(i)->getDefaultSerialization();
|
auto serialization = data_types.at(i)->getDefaultSerialization();
|
||||||
|
|
||||||
auto out = disk_->writeFile(part_path + file_name);
|
auto out = disk_->writeFile(fs::path(part_path) / file_name);
|
||||||
HashingWriteBuffer out_hashing(*out);
|
HashingWriteBuffer out_hashing(*out);
|
||||||
serialization->serializeBinary(hyperrectangle[i].left, out_hashing);
|
serialization->serializeBinary(hyperrectangle[i].left, out_hashing);
|
||||||
serialization->serializeBinary(hyperrectangle[i].right, out_hashing);
|
serialization->serializeBinary(hyperrectangle[i].right, out_hashing);
|
||||||
@ -560,7 +560,7 @@ String IMergeTreeDataPart::getFullPath() const
|
|||||||
if (relative_path.empty())
|
if (relative_path.empty())
|
||||||
throw Exception("Part relative_path cannot be empty. It's bug.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Part relative_path cannot be empty. It's bug.", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
return storage.getFullPathOnDisk(volume->getDisk()) + (parent_part ? parent_part->relative_path + "/" : "") + relative_path + "/";
|
return fs::path(storage.getFullPathOnDisk(volume->getDisk())) / (parent_part ? parent_part->relative_path : "") / relative_path / "";
|
||||||
}
|
}
|
||||||
|
|
||||||
String IMergeTreeDataPart::getFullRelativePath() const
|
String IMergeTreeDataPart::getFullRelativePath() const
|
||||||
@ -568,7 +568,7 @@ String IMergeTreeDataPart::getFullRelativePath() const
|
|||||||
if (relative_path.empty())
|
if (relative_path.empty())
|
||||||
throw Exception("Part relative_path cannot be empty. It's bug.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Part relative_path cannot be empty. It's bug.", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
return storage.relative_data_path + (parent_part ? parent_part->relative_path + "/" : "") + relative_path + "/";
|
return fs::path(storage.relative_data_path) / (parent_part ? parent_part->relative_path : "") / relative_path / "";
|
||||||
}
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checksums, bool check_consistency)
|
void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checksums, bool check_consistency)
|
||||||
@ -643,7 +643,7 @@ void IMergeTreeDataPart::loadIndex()
|
|||||||
loaded_index[i]->reserve(index_granularity.getMarksCount());
|
loaded_index[i]->reserve(index_granularity.getMarksCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
String index_path = getFullRelativePath() + "primary.idx";
|
String index_path = fs::path(getFullRelativePath()) / "primary.idx";
|
||||||
auto index_file = openForReading(volume->getDisk(), index_path);
|
auto index_file = openForReading(volume->getDisk(), index_path);
|
||||||
|
|
||||||
size_t marks_count = index_granularity.getMarksCount();
|
size_t marks_count = index_granularity.getMarksCount();
|
||||||
@ -678,7 +678,7 @@ NameSet IMergeTreeDataPart::getFileNamesWithoutChecksums() const
|
|||||||
return {};
|
return {};
|
||||||
|
|
||||||
NameSet result = {"checksums.txt", "columns.txt"};
|
NameSet result = {"checksums.txt", "columns.txt"};
|
||||||
String default_codec_path = getFullRelativePath() + DEFAULT_COMPRESSION_CODEC_FILE_NAME;
|
String default_codec_path = fs::path(getFullRelativePath()) / DEFAULT_COMPRESSION_CODEC_FILE_NAME;
|
||||||
|
|
||||||
if (volume->getDisk()->exists(default_codec_path))
|
if (volume->getDisk()->exists(default_codec_path))
|
||||||
result.emplace(DEFAULT_COMPRESSION_CODEC_FILE_NAME);
|
result.emplace(DEFAULT_COMPRESSION_CODEC_FILE_NAME);
|
||||||
@ -695,7 +695,7 @@ void IMergeTreeDataPart::loadDefaultCompressionCodec()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
String path = getFullRelativePath() + DEFAULT_COMPRESSION_CODEC_FILE_NAME;
|
String path = fs::path(getFullRelativePath()) / DEFAULT_COMPRESSION_CODEC_FILE_NAME;
|
||||||
if (!volume->getDisk()->exists(path))
|
if (!volume->getDisk()->exists(path))
|
||||||
{
|
{
|
||||||
default_codec = detectDefaultCompressionCodec();
|
default_codec = detectDefaultCompressionCodec();
|
||||||
@ -756,7 +756,7 @@ CompressionCodecPtr IMergeTreeDataPart::detectDefaultCompressionCodec() const
|
|||||||
{
|
{
|
||||||
if (path_to_data_file.empty())
|
if (path_to_data_file.empty())
|
||||||
{
|
{
|
||||||
String candidate_path = getFullRelativePath() + ISerialization::getFileNameForStream(part_column, substream_path) + ".bin";
|
String candidate_path = fs::path(getFullRelativePath()) / (ISerialization::getFileNameForStream(part_column, substream_path) + ".bin");
|
||||||
|
|
||||||
/// We can have existing, but empty .bin files. Example: LowCardinality(Nullable(...)) columns and column_name.dict.null.bin file.
|
/// We can have existing, but empty .bin files. Example: LowCardinality(Nullable(...)) columns and column_name.dict.null.bin file.
|
||||||
if (volume->getDisk()->exists(candidate_path) && volume->getDisk()->getFileSize(candidate_path) != 0)
|
if (volume->getDisk()->exists(candidate_path) && volume->getDisk()->getFileSize(candidate_path) != 0)
|
||||||
@ -822,7 +822,7 @@ void IMergeTreeDataPart::loadPartitionAndMinMaxIndex()
|
|||||||
|
|
||||||
void IMergeTreeDataPart::loadChecksums(bool require)
|
void IMergeTreeDataPart::loadChecksums(bool require)
|
||||||
{
|
{
|
||||||
const String path = getFullRelativePath() + "checksums.txt";
|
const String path = fs::path(getFullRelativePath()) / "checksums.txt";
|
||||||
|
|
||||||
if (volume->getDisk()->exists(path))
|
if (volume->getDisk()->exists(path))
|
||||||
{
|
{
|
||||||
@ -847,11 +847,11 @@ void IMergeTreeDataPart::loadChecksums(bool require)
|
|||||||
checksums = checkDataPart(shared_from_this(), false);
|
checksums = checkDataPart(shared_from_this(), false);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto out = volume->getDisk()->writeFile(getFullRelativePath() + "checksums.txt.tmp", 4096);
|
auto out = volume->getDisk()->writeFile(fs::path(getFullRelativePath()) / "checksums.txt.tmp", 4096);
|
||||||
checksums.write(*out);
|
checksums.write(*out);
|
||||||
}
|
}
|
||||||
|
|
||||||
volume->getDisk()->moveFile(getFullRelativePath() + "checksums.txt.tmp", getFullRelativePath() + "checksums.txt");
|
volume->getDisk()->moveFile(fs::path(getFullRelativePath()) / "checksums.txt.tmp", fs::path(getFullRelativePath()) / "checksums.txt");
|
||||||
|
|
||||||
bytes_on_disk = checksums.getTotalSizeOnDisk();
|
bytes_on_disk = checksums.getTotalSizeOnDisk();
|
||||||
}
|
}
|
||||||
@ -859,7 +859,7 @@ void IMergeTreeDataPart::loadChecksums(bool require)
|
|||||||
|
|
||||||
void IMergeTreeDataPart::loadRowsCount()
|
void IMergeTreeDataPart::loadRowsCount()
|
||||||
{
|
{
|
||||||
String path = getFullRelativePath() + "count.txt";
|
String path = fs::path(getFullRelativePath()) / "count.txt";
|
||||||
if (index_granularity.empty())
|
if (index_granularity.empty())
|
||||||
{
|
{
|
||||||
rows_count = 0;
|
rows_count = 0;
|
||||||
@ -960,7 +960,7 @@ void IMergeTreeDataPart::loadRowsCount()
|
|||||||
|
|
||||||
void IMergeTreeDataPart::loadTTLInfos()
|
void IMergeTreeDataPart::loadTTLInfos()
|
||||||
{
|
{
|
||||||
String path = getFullRelativePath() + "ttl.txt";
|
String path = fs::path(getFullRelativePath()) / "ttl.txt";
|
||||||
if (volume->getDisk()->exists(path))
|
if (volume->getDisk()->exists(path))
|
||||||
{
|
{
|
||||||
auto in = openForReading(volume->getDisk(), path);
|
auto in = openForReading(volume->getDisk(), path);
|
||||||
@ -987,7 +987,7 @@ void IMergeTreeDataPart::loadTTLInfos()
|
|||||||
|
|
||||||
void IMergeTreeDataPart::loadUUID()
|
void IMergeTreeDataPart::loadUUID()
|
||||||
{
|
{
|
||||||
String path = getFullRelativePath() + UUID_FILE_NAME;
|
String path = fs::path(getFullRelativePath()) / UUID_FILE_NAME;
|
||||||
|
|
||||||
if (volume->getDisk()->exists(path))
|
if (volume->getDisk()->exists(path))
|
||||||
{
|
{
|
||||||
@ -1000,7 +1000,7 @@ void IMergeTreeDataPart::loadUUID()
|
|||||||
|
|
||||||
void IMergeTreeDataPart::loadColumns(bool require)
|
void IMergeTreeDataPart::loadColumns(bool require)
|
||||||
{
|
{
|
||||||
String path = getFullRelativePath() + "columns.txt";
|
String path = fs::path(getFullRelativePath()) / "columns.txt";
|
||||||
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
auto metadata_snapshot = storage.getInMemoryMetadataPtr();
|
||||||
if (parent_part)
|
if (parent_part)
|
||||||
metadata_snapshot = metadata_snapshot->projections.get(name).metadata;
|
metadata_snapshot = metadata_snapshot->projections.get(name).metadata;
|
||||||
@ -1015,7 +1015,7 @@ void IMergeTreeDataPart::loadColumns(bool require)
|
|||||||
|
|
||||||
/// If there is no file with a list of columns, write it down.
|
/// If there is no file with a list of columns, write it down.
|
||||||
for (const NameAndTypePair & column : metadata_snapshot->getColumns().getAllPhysical())
|
for (const NameAndTypePair & column : metadata_snapshot->getColumns().getAllPhysical())
|
||||||
if (volume->getDisk()->exists(getFullRelativePath() + getFileNameForColumn(column) + ".bin"))
|
if (volume->getDisk()->exists(fs::path(getFullRelativePath()) / (getFileNameForColumn(column) + ".bin")))
|
||||||
loaded_columns.push_back(column);
|
loaded_columns.push_back(column);
|
||||||
|
|
||||||
if (columns.empty())
|
if (columns.empty())
|
||||||
@ -1053,7 +1053,7 @@ UInt64 IMergeTreeDataPart::calculateTotalSizeOnDisk(const DiskPtr & disk_, const
|
|||||||
disk_->listFiles(from, files);
|
disk_->listFiles(from, files);
|
||||||
UInt64 res = 0;
|
UInt64 res = 0;
|
||||||
for (const auto & file : files)
|
for (const auto & file : files)
|
||||||
res += calculateTotalSizeOnDisk(disk_, from + "/" + file);
|
res += calculateTotalSizeOnDisk(disk_, fs::path(from) / file);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1063,7 +1063,7 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_
|
|||||||
assertOnDisk();
|
assertOnDisk();
|
||||||
|
|
||||||
String from = getFullRelativePath();
|
String from = getFullRelativePath();
|
||||||
String to = storage.relative_data_path + (parent_part ? parent_part->relative_path + "/" : "") + new_relative_path + "/";
|
String to = fs::path(storage.relative_data_path) / (parent_part ? parent_part->relative_path : "") / new_relative_path / "";
|
||||||
|
|
||||||
if (!volume->getDisk()->exists(from))
|
if (!volume->getDisk()->exists(from))
|
||||||
throw Exception("Part directory " + fullPath(volume->getDisk(), from) + " doesn't exist. Most likely it is a logical error.", ErrorCodes::FILE_DOESNT_EXIST);
|
throw Exception("Part directory " + fullPath(volume->getDisk(), from) + " doesn't exist. Most likely it is a logical error.", ErrorCodes::FILE_DOESNT_EXIST);
|
||||||
@ -1124,8 +1124,8 @@ void IMergeTreeDataPart::remove(bool keep_s3) const
|
|||||||
* And a race condition can happen that will lead to "File not found" error here.
|
* And a race condition can happen that will lead to "File not found" error here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
String from = storage.relative_data_path + relative_path;
|
fs::path from = fs::path(storage.relative_data_path) / relative_path;
|
||||||
String to = storage.relative_data_path + "delete_tmp_" + name;
|
fs::path to = fs::path(storage.relative_data_path) / ("delete_tmp_" + name);
|
||||||
// TODO directory delete_tmp_<name> is never removed if server crashes before returning from this function
|
// TODO directory delete_tmp_<name> is never removed if server crashes before returning from this function
|
||||||
|
|
||||||
auto disk = volume->getDisk();
|
auto disk = volume->getDisk();
|
||||||
@ -1134,7 +1134,7 @@ void IMergeTreeDataPart::remove(bool keep_s3) const
|
|||||||
LOG_WARNING(storage.log, "Directory {} (to which part must be renamed before removing) already exists. Most likely this is due to unclean restart. Removing it.", fullPath(disk, to));
|
LOG_WARNING(storage.log, "Directory {} (to which part must be renamed before removing) already exists. Most likely this is due to unclean restart. Removing it.", fullPath(disk, to));
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
disk->removeSharedRecursive(to + "/", keep_s3);
|
disk->removeSharedRecursive(fs::path(to) / "", keep_s3);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -1147,12 +1147,15 @@ void IMergeTreeDataPart::remove(bool keep_s3) const
|
|||||||
{
|
{
|
||||||
disk->moveDirectory(from, to);
|
disk->moveDirectory(from, to);
|
||||||
}
|
}
|
||||||
catch (const Poco::FileNotFoundException &)
|
catch (const fs::filesystem_error & e)
|
||||||
|
{
|
||||||
|
if (e.code() == std::errc::no_such_file_or_directory)
|
||||||
{
|
{
|
||||||
LOG_ERROR(storage.log, "Directory {} (part to remove) doesn't exist or one of nested files has gone. Most likely this is due to manual removing. This should be discouraged. Ignoring.", fullPath(disk, to));
|
LOG_ERROR(storage.log, "Directory {} (part to remove) doesn't exist or one of nested files has gone. Most likely this is due to manual removing. This should be discouraged. Ignoring.", fullPath(disk, to));
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
// Record existing projection directories so we don't remove them twice
|
// Record existing projection directories so we don't remove them twice
|
||||||
std::unordered_set<String> projection_directories;
|
std::unordered_set<String> projection_directories;
|
||||||
@ -1166,7 +1169,7 @@ void IMergeTreeDataPart::remove(bool keep_s3) const
|
|||||||
if (checksums.empty())
|
if (checksums.empty())
|
||||||
{
|
{
|
||||||
/// If the part is not completely written, we cannot use fast path by listing files.
|
/// If the part is not completely written, we cannot use fast path by listing files.
|
||||||
disk->removeSharedRecursive(to + "/", keep_s3);
|
disk->removeSharedRecursive(fs::path(to) / "", keep_s3);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -1181,17 +1184,17 @@ void IMergeTreeDataPart::remove(bool keep_s3) const
|
|||||||
for (const auto & [file, _] : checksums.files)
|
for (const auto & [file, _] : checksums.files)
|
||||||
{
|
{
|
||||||
if (projection_directories.find(file) == projection_directories.end())
|
if (projection_directories.find(file) == projection_directories.end())
|
||||||
disk->removeSharedFile(to + "/" + file, keep_s3);
|
disk->removeSharedFile(fs::path(to) / file, keep_s3);
|
||||||
}
|
}
|
||||||
#if !defined(__clang__)
|
#if !defined(__clang__)
|
||||||
# pragma GCC diagnostic pop
|
# pragma GCC diagnostic pop
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (const auto & file : {"checksums.txt", "columns.txt"})
|
for (const auto & file : {"checksums.txt", "columns.txt"})
|
||||||
disk->removeSharedFile(to + "/" + file, keep_s3);
|
disk->removeSharedFile(fs::path(to) / file, keep_s3);
|
||||||
|
|
||||||
disk->removeSharedFileIfExists(to + "/" + DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_s3);
|
disk->removeSharedFileIfExists(fs::path(to) / DEFAULT_COMPRESSION_CODEC_FILE_NAME, keep_s3);
|
||||||
disk->removeSharedFileIfExists(to + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_s3);
|
disk->removeSharedFileIfExists(fs::path(to) / DELETE_ON_DESTROY_MARKER_FILE_NAME, keep_s3);
|
||||||
|
|
||||||
disk->removeDirectory(to);
|
disk->removeDirectory(to);
|
||||||
}
|
}
|
||||||
@ -1201,7 +1204,7 @@ void IMergeTreeDataPart::remove(bool keep_s3) const
|
|||||||
|
|
||||||
LOG_ERROR(storage.log, "Cannot quickly remove directory {} by removing files; fallback to recursive removal. Reason: {}", fullPath(disk, to), getCurrentExceptionMessage(false));
|
LOG_ERROR(storage.log, "Cannot quickly remove directory {} by removing files; fallback to recursive removal. Reason: {}", fullPath(disk, to), getCurrentExceptionMessage(false));
|
||||||
|
|
||||||
disk->removeSharedRecursive(to + "/", keep_s3);
|
disk->removeSharedRecursive(fs::path(to) / "", keep_s3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1268,7 +1271,7 @@ String IMergeTreeDataPart::getRelativePathForPrefix(const String & prefix) const
|
|||||||
{
|
{
|
||||||
res = (prefix.empty() ? "" : prefix + "_") + name + (try_no ? "_try" + DB::toString(try_no) : "");
|
res = (prefix.empty() ? "" : prefix + "_") + name + (try_no ? "_try" + DB::toString(try_no) : "");
|
||||||
|
|
||||||
if (!volume->getDisk()->exists(getFullRelativePath() + res))
|
if (!volume->getDisk()->exists(fs::path(getFullRelativePath()) / res))
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
LOG_WARNING(storage.log, "Directory {} (to detach to) already exists. Will detach to directory with '_tryN' suffix.", res);
|
LOG_WARNING(storage.log, "Directory {} (to detach to) already exists. Will detach to directory with '_tryN' suffix.", res);
|
||||||
@ -1291,11 +1294,11 @@ void IMergeTreeDataPart::renameToDetached(const String & prefix) const
|
|||||||
|
|
||||||
void IMergeTreeDataPart::makeCloneInDetached(const String & prefix, const StorageMetadataPtr & /*metadata_snapshot*/) const
|
void IMergeTreeDataPart::makeCloneInDetached(const String & prefix, const StorageMetadataPtr & /*metadata_snapshot*/) const
|
||||||
{
|
{
|
||||||
String destination_path = storage.relative_data_path + getRelativePathForDetachedPart(prefix);
|
String destination_path = fs::path(storage.relative_data_path) / getRelativePathForDetachedPart(prefix);
|
||||||
|
|
||||||
/// Backup is not recursive (max_level is 0), so do not copy inner directories
|
/// Backup is not recursive (max_level is 0), so do not copy inner directories
|
||||||
localBackup(volume->getDisk(), getFullRelativePath(), destination_path, 0);
|
localBackup(volume->getDisk(), getFullRelativePath(), destination_path, 0);
|
||||||
volume->getDisk()->removeFileIfExists(destination_path + "/" + DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
volume->getDisk()->removeFileIfExists(fs::path(destination_path) / DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const
|
void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const
|
||||||
@ -1307,16 +1310,16 @@ void IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & di
|
|||||||
if (directory_name.empty())
|
if (directory_name.empty())
|
||||||
throw Exception("Can not clone data part " + name + " to empty directory.", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Can not clone data part " + name + " to empty directory.", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
String path_to_clone = storage.relative_data_path + directory_name + '/';
|
String path_to_clone = fs::path(storage.relative_data_path) / directory_name / "";
|
||||||
|
|
||||||
if (disk->exists(path_to_clone + relative_path))
|
if (disk->exists(fs::path(path_to_clone) / relative_path))
|
||||||
{
|
{
|
||||||
LOG_WARNING(storage.log, "Path " + fullPath(disk, path_to_clone + relative_path) + " already exists. Will remove it and clone again.");
|
LOG_WARNING(storage.log, "Path " + fullPath(disk, path_to_clone + relative_path) + " already exists. Will remove it and clone again.");
|
||||||
disk->removeRecursive(path_to_clone + relative_path + '/');
|
disk->removeRecursive(fs::path(path_to_clone) / relative_path / "");
|
||||||
}
|
}
|
||||||
disk->createDirectories(path_to_clone);
|
disk->createDirectories(path_to_clone);
|
||||||
volume->getDisk()->copy(getFullRelativePath(), disk, path_to_clone);
|
volume->getDisk()->copy(getFullRelativePath(), disk, path_to_clone);
|
||||||
volume->getDisk()->removeFileIfExists(path_to_clone + '/' + DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
volume->getDisk()->removeFileIfExists(fs::path(path_to_clone) / DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
void IMergeTreeDataPart::checkConsistencyBase() const
|
void IMergeTreeDataPart::checkConsistencyBase() const
|
||||||
@ -1370,19 +1373,19 @@ void IMergeTreeDataPart::checkConsistencyBase() const
|
|||||||
|
|
||||||
/// Check that the primary key index is not empty.
|
/// Check that the primary key index is not empty.
|
||||||
if (!pk.column_names.empty())
|
if (!pk.column_names.empty())
|
||||||
check_file_not_empty(volume->getDisk(), path + "primary.idx");
|
check_file_not_empty(volume->getDisk(), fs::path(path) / "primary.idx");
|
||||||
|
|
||||||
if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING)
|
if (storage.format_version >= MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING)
|
||||||
{
|
{
|
||||||
check_file_not_empty(volume->getDisk(), path + "count.txt");
|
check_file_not_empty(volume->getDisk(), fs::path(path) / "count.txt");
|
||||||
|
|
||||||
if (metadata_snapshot->hasPartitionKey())
|
if (metadata_snapshot->hasPartitionKey())
|
||||||
check_file_not_empty(volume->getDisk(), path + "partition.dat");
|
check_file_not_empty(volume->getDisk(), fs::path(path) / "partition.dat");
|
||||||
|
|
||||||
if (!parent_part)
|
if (!parent_part)
|
||||||
{
|
{
|
||||||
for (const String & col_name : storage.getMinMaxColumnsNames(partition_key))
|
for (const String & col_name : storage.getMinMaxColumnsNames(partition_key))
|
||||||
check_file_not_empty(volume->getDisk(), path + "minmax_" + escapeForFileName(col_name) + ".idx");
|
check_file_not_empty(volume->getDisk(), fs::path(path) / ("minmax_" + escapeForFileName(col_name) + ".idx"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1477,7 +1480,7 @@ String IMergeTreeDataPart::getUniqueId() const
|
|||||||
auto disk = volume->getDisk();
|
auto disk = volume->getDisk();
|
||||||
|
|
||||||
if (disk->getType() == DB::DiskType::Type::S3)
|
if (disk->getType() == DB::DiskType::Type::S3)
|
||||||
id = disk->getUniqueId(getFullRelativePath() + "checksums.txt");
|
id = disk->getUniqueId(fs::path(getFullRelativePath()) / "checksums.txt");
|
||||||
|
|
||||||
if (id.empty())
|
if (id.empty())
|
||||||
throw Exception("Can't get unique S3 object", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Can't get unique S3 object", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
@ -17,8 +17,6 @@
|
|||||||
#include <Storages/MergeTree/MergeTreeIOSettings.h>
|
#include <Storages/MergeTree/MergeTreeIOSettings.h>
|
||||||
#include <Storages/MergeTree/KeyCondition.h>
|
#include <Storages/MergeTree/KeyCondition.h>
|
||||||
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
|
|
||||||
#include <shared_mutex>
|
#include <shared_mutex>
|
||||||
|
|
||||||
namespace zkutil
|
namespace zkutil
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <Interpreters/inplaceBlockConversions.h>
|
#include <Interpreters/inplaceBlockConversions.h>
|
||||||
#include <Storages/MergeTree/IMergeTreeReader.h>
|
#include <Storages/MergeTree/IMergeTreeReader.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -83,7 +83,7 @@ private:
|
|||||||
void createNode()
|
void createNode()
|
||||||
{
|
{
|
||||||
shutdown_called = false;
|
shutdown_called = false;
|
||||||
node = EphemeralNodeHolder::createSequential(path + "/leader_election-", zookeeper, identifier);
|
node = EphemeralNodeHolder::createSequential(fs::path(path) / "leader_election-", zookeeper, identifier);
|
||||||
|
|
||||||
std::string node_path = node->getPath();
|
std::string node_path = node->getPath();
|
||||||
node_name = node_path.substr(node_path.find_last_of('/') + 1);
|
node_name = node_path.substr(node_path.find_last_of('/') + 1);
|
||||||
|
@ -52,8 +52,6 @@
|
|||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
|
|
||||||
#include <Poco/DirectoryIterator.h>
|
|
||||||
|
|
||||||
#include <boost/range/adaptor/filtered.hpp>
|
#include <boost/range/adaptor/filtered.hpp>
|
||||||
#include <boost/algorithm/string/join.hpp>
|
#include <boost/algorithm/string/join.hpp>
|
||||||
|
|
||||||
@ -67,8 +65,11 @@
|
|||||||
#include <typeinfo>
|
#include <typeinfo>
|
||||||
#include <typeindex>
|
#include <typeindex>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
extern const Event RejectedInserts;
|
extern const Event RejectedInserts;
|
||||||
@ -210,8 +211,8 @@ MergeTreeData::MergeTreeData(
|
|||||||
for (const auto & [path, disk] : getRelativeDataPathsWithDisks())
|
for (const auto & [path, disk] : getRelativeDataPathsWithDisks())
|
||||||
{
|
{
|
||||||
disk->createDirectories(path);
|
disk->createDirectories(path);
|
||||||
disk->createDirectories(path + MergeTreeData::DETACHED_DIR_NAME);
|
disk->createDirectories(fs::path(path) / MergeTreeData::DETACHED_DIR_NAME);
|
||||||
auto current_version_file_path = path + MergeTreeData::FORMAT_VERSION_FILE_NAME;
|
String current_version_file_path = fs::path(path) / MergeTreeData::FORMAT_VERSION_FILE_NAME;
|
||||||
if (disk->exists(current_version_file_path))
|
if (disk->exists(current_version_file_path))
|
||||||
{
|
{
|
||||||
if (!version_file.first.empty())
|
if (!version_file.first.empty())
|
||||||
@ -225,7 +226,7 @@ MergeTreeData::MergeTreeData(
|
|||||||
|
|
||||||
/// If not choose any
|
/// If not choose any
|
||||||
if (version_file.first.empty())
|
if (version_file.first.empty())
|
||||||
version_file = {relative_data_path + MergeTreeData::FORMAT_VERSION_FILE_NAME, getStoragePolicy()->getAnyDisk()};
|
version_file = {fs::path(relative_data_path) / MergeTreeData::FORMAT_VERSION_FILE_NAME, getStoragePolicy()->getAnyDisk()};
|
||||||
|
|
||||||
bool version_file_exists = version_file.second->exists(version_file.first);
|
bool version_file_exists = version_file.second->exists(version_file.first);
|
||||||
|
|
||||||
@ -927,8 +928,8 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
|||||||
auto part = createPart(part_name, part_info, single_disk_volume, part_name);
|
auto part = createPart(part_name, part_info, single_disk_volume, part_name);
|
||||||
bool broken = false;
|
bool broken = false;
|
||||||
|
|
||||||
String part_path = relative_data_path + "/" + part_name;
|
String part_path = fs::path(relative_data_path) / part_name;
|
||||||
String marker_path = part_path + "/" + IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME;
|
String marker_path = fs::path(part_path) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME;
|
||||||
if (part_disk_ptr->exists(marker_path))
|
if (part_disk_ptr->exists(marker_path))
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Detaching stale part {}{}, which should have been deleted after a move. That can only happen after unclean restart of ClickHouse after move of a part having an operation blocking that stale copy of part.", getFullPathOnDisk(part_disk_ptr), part_name);
|
LOG_WARNING(log, "Detaching stale part {}{}, which should have been deleted after a move. That can only happen after unclean restart of ClickHouse after move of a part having an operation blocking that stale copy of part.", getFullPathOnDisk(part_disk_ptr), part_name);
|
||||||
@ -1016,7 +1017,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
|||||||
else
|
else
|
||||||
has_adaptive_parts.store(true, std::memory_order_relaxed);
|
has_adaptive_parts.store(true, std::memory_order_relaxed);
|
||||||
|
|
||||||
part->modification_time = part_disk_ptr->getLastModified(relative_data_path + part_name).epochTime();
|
part->modification_time = part_disk_ptr->getLastModified(fs::path(relative_data_path) / part_name).epochTime();
|
||||||
/// Assume that all parts are Committed, covered parts will be detected and marked as Outdated later
|
/// Assume that all parts are Committed, covered parts will be detected and marked as Outdated later
|
||||||
part->setState(DataPartState::Committed);
|
part->setState(DataPartState::Committed);
|
||||||
|
|
||||||
@ -1160,10 +1161,15 @@ void MergeTreeData::clearOldTemporaryDirectories(ssize_t custom_directories_life
|
|||||||
disk->removeRecursive(it->path());
|
disk->removeRecursive(it->path());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (const Poco::FileNotFoundException &)
|
catch (const fs::filesystem_error & e)
|
||||||
|
{
|
||||||
|
if (e.code() == std::errc::no_such_file_or_directory)
|
||||||
{
|
{
|
||||||
/// If the file is already deleted, do nothing.
|
/// If the file is already deleted, do nothing.
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
throw;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1448,11 +1454,16 @@ void MergeTreeData::dropAllData()
|
|||||||
{
|
{
|
||||||
disk->removeRecursive(path);
|
disk->removeRecursive(path);
|
||||||
}
|
}
|
||||||
catch (const Poco::FileNotFoundException &)
|
catch (const fs::filesystem_error & e)
|
||||||
|
{
|
||||||
|
if (e.code() == std::errc::no_such_file_or_directory)
|
||||||
{
|
{
|
||||||
/// If the file is already deleted, log the error message and do nothing.
|
/// If the file is already deleted, log the error message and do nothing.
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
throw;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
setDataVolume(0, 0, 0);
|
setDataVolume(0, 0, 0);
|
||||||
@ -1474,8 +1485,8 @@ void MergeTreeData::dropIfEmpty()
|
|||||||
for (const auto & [path, disk] : getRelativeDataPathsWithDisks())
|
for (const auto & [path, disk] : getRelativeDataPathsWithDisks())
|
||||||
{
|
{
|
||||||
/// Non recursive, exception is thrown if there are more files.
|
/// Non recursive, exception is thrown if there are more files.
|
||||||
disk->removeFileIfExists(path + MergeTreeData::FORMAT_VERSION_FILE_NAME);
|
disk->removeFileIfExists(fs::path(path) / MergeTreeData::FORMAT_VERSION_FILE_NAME);
|
||||||
disk->removeDirectory(path + MergeTreeData::DETACHED_DIR_NAME);
|
disk->removeDirectory(fs::path(path) / MergeTreeData::DETACHED_DIR_NAME);
|
||||||
disk->removeDirectory(path);
|
disk->removeDirectory(path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1925,7 +1936,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::createPart(
|
|||||||
const VolumePtr & volume, const String & relative_path, const IMergeTreeDataPart * parent_part) const
|
const VolumePtr & volume, const String & relative_path, const IMergeTreeDataPart * parent_part) const
|
||||||
{
|
{
|
||||||
MergeTreeDataPartType type;
|
MergeTreeDataPartType type;
|
||||||
auto full_path = relative_data_path + (parent_part ? parent_part->relative_path + "/" : "") + relative_path + "/";
|
auto full_path = fs::path(relative_data_path) / (parent_part ? parent_part->relative_path : "") / relative_path / "";
|
||||||
auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(volume->getDisk(), full_path);
|
auto mrk_ext = MergeTreeIndexGranularityInfo::getMarksExtensionFromFilesystem(volume->getDisk(), full_path);
|
||||||
|
|
||||||
if (mrk_ext)
|
if (mrk_ext)
|
||||||
@ -1978,7 +1989,7 @@ void MergeTreeData::changeSettings(
|
|||||||
{
|
{
|
||||||
auto disk = new_storage_policy->getDiskByName(disk_name);
|
auto disk = new_storage_policy->getDiskByName(disk_name);
|
||||||
disk->createDirectories(relative_data_path);
|
disk->createDirectories(relative_data_path);
|
||||||
disk->createDirectories(relative_data_path + MergeTreeData::DETACHED_DIR_NAME);
|
disk->createDirectories(fs::path(relative_data_path) / MergeTreeData::DETACHED_DIR_NAME);
|
||||||
}
|
}
|
||||||
/// FIXME how would that be done while reloading configuration???
|
/// FIXME how would that be done while reloading configuration???
|
||||||
|
|
||||||
@ -2007,7 +2018,7 @@ void MergeTreeData::PartsTemporaryRename::addPart(const String & old_name, const
|
|||||||
old_and_new_names.push_back({old_name, new_name});
|
old_and_new_names.push_back({old_name, new_name});
|
||||||
for (const auto & [path, disk] : storage.getRelativeDataPathsWithDisks())
|
for (const auto & [path, disk] : storage.getRelativeDataPathsWithDisks())
|
||||||
{
|
{
|
||||||
for (auto it = disk->iterateDirectory(path + source_dir); it->isValid(); it->next())
|
for (auto it = disk->iterateDirectory(fs::path(path) / source_dir); it->isValid(); it->next())
|
||||||
{
|
{
|
||||||
if (it->name() == old_name)
|
if (it->name() == old_name)
|
||||||
{
|
{
|
||||||
@ -2029,8 +2040,8 @@ void MergeTreeData::PartsTemporaryRename::tryRenameAll()
|
|||||||
if (old_name.empty() || new_name.empty())
|
if (old_name.empty() || new_name.empty())
|
||||||
throw DB::Exception("Empty part name. Most likely it's a bug.", ErrorCodes::INCORRECT_FILE_NAME);
|
throw DB::Exception("Empty part name. Most likely it's a bug.", ErrorCodes::INCORRECT_FILE_NAME);
|
||||||
const auto & [path, disk] = old_part_name_to_path_and_disk[old_name];
|
const auto & [path, disk] = old_part_name_to_path_and_disk[old_name];
|
||||||
const auto full_path = path + source_dir; /// for old_name
|
const auto full_path = fs::path(path) / source_dir; /// for old_name
|
||||||
disk->moveFile(full_path + old_name, full_path + new_name);
|
disk->moveFile(fs::path(full_path) / old_name, fs::path(full_path) / new_name);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -2054,8 +2065,8 @@ MergeTreeData::PartsTemporaryRename::~PartsTemporaryRename()
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
const auto & [path, disk] = old_part_name_to_path_and_disk[old_name];
|
const auto & [path, disk] = old_part_name_to_path_and_disk[old_name];
|
||||||
const auto full_path = path + source_dir; /// for old_name
|
const String full_path = fs::path(path) / source_dir; /// for old_name
|
||||||
disk->moveFile(full_path + new_name, full_path + old_name);
|
disk->moveFile(fs::path(full_path) / new_name, fs::path(full_path) / old_name);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -2752,7 +2763,7 @@ void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy)
|
|||||||
addPartContributionToDataVolume(part_copy);
|
addPartContributionToDataVolume(part_copy);
|
||||||
|
|
||||||
auto disk = original_active_part->volume->getDisk();
|
auto disk = original_active_part->volume->getDisk();
|
||||||
String marker_path = original_active_part->getFullRelativePath() + IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME;
|
String marker_path = fs::path(original_active_part->getFullRelativePath()) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
disk->createFile(marker_path);
|
disk->createFile(marker_path);
|
||||||
@ -3315,7 +3326,7 @@ MergeTreeData::getDetachedParts() const
|
|||||||
|
|
||||||
for (const auto & [path, disk] : getRelativeDataPathsWithDisks())
|
for (const auto & [path, disk] : getRelativeDataPathsWithDisks())
|
||||||
{
|
{
|
||||||
for (auto it = disk->iterateDirectory(path + MergeTreeData::DETACHED_DIR_NAME); it->isValid(); it->next())
|
for (auto it = disk->iterateDirectory(fs::path(path) / MergeTreeData::DETACHED_DIR_NAME); it->isValid(); it->next())
|
||||||
{
|
{
|
||||||
res.emplace_back();
|
res.emplace_back();
|
||||||
auto & part = res.back();
|
auto & part = res.back();
|
||||||
@ -3370,7 +3381,7 @@ void MergeTreeData::dropDetached(const ASTPtr & partition, bool part, ContextPtr
|
|||||||
for (auto & [old_name, new_name] : renamed_parts.old_and_new_names)
|
for (auto & [old_name, new_name] : renamed_parts.old_and_new_names)
|
||||||
{
|
{
|
||||||
const auto & [path, disk] = renamed_parts.old_part_name_to_path_and_disk[old_name];
|
const auto & [path, disk] = renamed_parts.old_part_name_to_path_and_disk[old_name];
|
||||||
disk->removeRecursive(path + "detached/" + new_name + "/");
|
disk->removeRecursive(fs::path(path) / "detached" / new_name / "");
|
||||||
LOG_DEBUG(log, "Dropped detached part {}", old_name);
|
LOG_DEBUG(log, "Dropped detached part {}", old_name);
|
||||||
old_name.clear();
|
old_name.clear();
|
||||||
}
|
}
|
||||||
@ -3431,8 +3442,8 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const
|
|||||||
|
|
||||||
if (!containing_part.empty() && containing_part != name)
|
if (!containing_part.empty() && containing_part != name)
|
||||||
// TODO maybe use PartsTemporaryRename here?
|
// TODO maybe use PartsTemporaryRename here?
|
||||||
disk->moveDirectory(relative_data_path + source_dir + name,
|
disk->moveDirectory(fs::path(relative_data_path) / source_dir / name,
|
||||||
relative_data_path + source_dir + "inactive_" + name);
|
fs::path(relative_data_path) / source_dir / ("inactive_" + name));
|
||||||
else
|
else
|
||||||
renamed_parts.addPart(name, "attaching_" + name);
|
renamed_parts.addPart(name, "attaching_" + name);
|
||||||
}
|
}
|
||||||
@ -4294,12 +4305,12 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPartOnSameDisk(
|
|||||||
const auto & src_relative_data_path = src_part_in_memory->storage.relative_data_path;
|
const auto & src_relative_data_path = src_part_in_memory->storage.relative_data_path;
|
||||||
auto flushed_part_path = src_part_in_memory->getRelativePathForPrefix(tmp_part_prefix);
|
auto flushed_part_path = src_part_in_memory->getRelativePathForPrefix(tmp_part_prefix);
|
||||||
src_part_in_memory->flushToDisk(src_relative_data_path, flushed_part_path, metadata_snapshot);
|
src_part_in_memory->flushToDisk(src_relative_data_path, flushed_part_path, metadata_snapshot);
|
||||||
src_part_path = src_relative_data_path + flushed_part_path + "/";
|
src_part_path = fs::path(src_relative_data_path) / flushed_part_path / "";
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG(log, "Cloning part {} to {}", fullPath(disk, src_part_path), fullPath(disk, dst_part_path));
|
LOG_DEBUG(log, "Cloning part {} to {}", fullPath(disk, src_part_path), fullPath(disk, dst_part_path));
|
||||||
localBackup(disk, src_part_path, dst_part_path);
|
localBackup(disk, src_part_path, dst_part_path);
|
||||||
disk->removeFileIfExists(dst_part_path + "/" + IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
disk->removeFileIfExists(fs::path(dst_part_path) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
||||||
|
|
||||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>(disk->getName(), disk, 0);
|
auto single_disk_volume = std::make_shared<SingleDiskVolume>(disk->getName(), disk, 0);
|
||||||
auto dst_data_part = createPart(dst_part_name, dst_part_info, single_disk_volume, tmp_dst_part_name);
|
auto dst_data_part = createPart(dst_part_name, dst_part_info, single_disk_volume, tmp_dst_part_name);
|
||||||
@ -4411,10 +4422,10 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
|
|||||||
const String & with_name,
|
const String & with_name,
|
||||||
ContextPtr local_context)
|
ContextPtr local_context)
|
||||||
{
|
{
|
||||||
String clickhouse_path = Poco::Path(local_context->getPath()).makeAbsolute().toString();
|
String clickhouse_path = fs::canonical(local_context->getPath());
|
||||||
String default_shadow_path = clickhouse_path + "shadow/";
|
String default_shadow_path = fs::path(clickhouse_path) / "shadow/";
|
||||||
Poco::File(default_shadow_path).createDirectories();
|
fs::create_directories(default_shadow_path);
|
||||||
auto increment = Increment(default_shadow_path + "increment.txt").get(true);
|
auto increment = Increment(fs::path(default_shadow_path) / "increment.txt").get(true);
|
||||||
|
|
||||||
const String shadow_path = "shadow/";
|
const String shadow_path = "shadow/";
|
||||||
|
|
||||||
@ -4422,7 +4433,7 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
|
|||||||
const auto data_parts = getDataParts();
|
const auto data_parts = getDataParts();
|
||||||
|
|
||||||
String backup_name = (!with_name.empty() ? escapeForFileName(with_name) : toString(increment));
|
String backup_name = (!with_name.empty() ? escapeForFileName(with_name) : toString(increment));
|
||||||
String backup_path = shadow_path + backup_name + "/";
|
String backup_path = fs::path(shadow_path) / backup_name / "";
|
||||||
|
|
||||||
for (const auto & disk : getStoragePolicy()->getDisks())
|
for (const auto & disk : getStoragePolicy()->getDisks())
|
||||||
disk->onFreeze(backup_path);
|
disk->onFreeze(backup_path);
|
||||||
@ -4439,20 +4450,20 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
|
|||||||
|
|
||||||
part->volume->getDisk()->createDirectories(backup_path);
|
part->volume->getDisk()->createDirectories(backup_path);
|
||||||
|
|
||||||
String backup_part_path = backup_path + relative_data_path + part->relative_path;
|
String backup_part_path = fs::path(backup_path) / relative_data_path / part->relative_path;
|
||||||
if (auto part_in_memory = asInMemoryPart(part))
|
if (auto part_in_memory = asInMemoryPart(part))
|
||||||
part_in_memory->flushToDisk(backup_path + relative_data_path, part->relative_path, metadata_snapshot);
|
part_in_memory->flushToDisk(fs::path(backup_path) / relative_data_path, part->relative_path, metadata_snapshot);
|
||||||
else
|
else
|
||||||
localBackup(part->volume->getDisk(), part->getFullRelativePath(), backup_part_path);
|
localBackup(part->volume->getDisk(), part->getFullRelativePath(), backup_part_path);
|
||||||
|
|
||||||
part->volume->getDisk()->removeFileIfExists(backup_part_path + "/" + IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
part->volume->getDisk()->removeFileIfExists(fs::path(backup_part_path) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
||||||
|
|
||||||
part->is_frozen.store(true, std::memory_order_relaxed);
|
part->is_frozen.store(true, std::memory_order_relaxed);
|
||||||
result.push_back(PartitionCommandResultInfo{
|
result.push_back(PartitionCommandResultInfo{
|
||||||
.partition_id = part->info.partition_id,
|
.partition_id = part->info.partition_id,
|
||||||
.part_name = part->name,
|
.part_name = part->name,
|
||||||
.backup_path = part->volume->getDisk()->getPath() + backup_path,
|
.backup_path = fs::path(part->volume->getDisk()->getPath()) / backup_path,
|
||||||
.part_backup_path = part->volume->getDisk()->getPath() + backup_part_path,
|
.part_backup_path = fs::path(part->volume->getDisk()->getPath()) / backup_part_path,
|
||||||
.backup_name = backup_name,
|
.backup_name = backup_name,
|
||||||
});
|
});
|
||||||
++parts_processed;
|
++parts_processed;
|
||||||
@ -4481,7 +4492,7 @@ PartitionCommandsResultInfo MergeTreeData::unfreezeAll(
|
|||||||
|
|
||||||
PartitionCommandsResultInfo MergeTreeData::unfreezePartitionsByMatcher(MatcherFn matcher, const String & backup_name, ContextPtr)
|
PartitionCommandsResultInfo MergeTreeData::unfreezePartitionsByMatcher(MatcherFn matcher, const String & backup_name, ContextPtr)
|
||||||
{
|
{
|
||||||
auto backup_path = std::filesystem::path("shadow") / escapeForFileName(backup_name) / relative_data_path;
|
auto backup_path = fs::path("shadow") / escapeForFileName(backup_name) / relative_data_path;
|
||||||
|
|
||||||
LOG_DEBUG(log, "Unfreezing parts by path {}", backup_path.generic_string());
|
LOG_DEBUG(log, "Unfreezing parts by path {}", backup_path.generic_string());
|
||||||
|
|
||||||
|
@ -2268,7 +2268,7 @@ void MergeTreeDataMergerMutator::finalizeMutatedPart(
|
|||||||
if (need_remove_expired_values)
|
if (need_remove_expired_values)
|
||||||
{
|
{
|
||||||
/// Write a file with ttl infos in json format.
|
/// Write a file with ttl infos in json format.
|
||||||
auto out_ttl = disk->writeFile(new_data_part->getFullRelativePath() + "ttl.txt", 4096);
|
auto out_ttl = disk->writeFile(fs::path(new_data_part->getFullRelativePath()) / "ttl.txt", 4096);
|
||||||
HashingWriteBuffer out_hashing(*out_ttl);
|
HashingWriteBuffer out_hashing(*out_ttl);
|
||||||
new_data_part->ttl_infos.write(out_hashing);
|
new_data_part->ttl_infos.write(out_hashing);
|
||||||
new_data_part->checksums.files["ttl.txt"].file_size = out_hashing.count();
|
new_data_part->checksums.files["ttl.txt"].file_size = out_hashing.count();
|
||||||
@ -2277,7 +2277,7 @@ void MergeTreeDataMergerMutator::finalizeMutatedPart(
|
|||||||
|
|
||||||
{
|
{
|
||||||
/// Write file with checksums.
|
/// Write file with checksums.
|
||||||
auto out_checksums = disk->writeFile(new_data_part->getFullRelativePath() + "checksums.txt", 4096);
|
auto out_checksums = disk->writeFile(fs::path(new_data_part->getFullRelativePath()) / "checksums.txt", 4096);
|
||||||
new_data_part->checksums.write(*out_checksums);
|
new_data_part->checksums.write(*out_checksums);
|
||||||
} /// close fd
|
} /// close fd
|
||||||
|
|
||||||
@ -2288,7 +2288,7 @@ void MergeTreeDataMergerMutator::finalizeMutatedPart(
|
|||||||
|
|
||||||
{
|
{
|
||||||
/// Write a file with a description of columns.
|
/// Write a file with a description of columns.
|
||||||
auto out_columns = disk->writeFile(new_data_part->getFullRelativePath() + "columns.txt", 4096);
|
auto out_columns = disk->writeFile(fs::path(new_data_part->getFullRelativePath()) / "columns.txt", 4096);
|
||||||
new_data_part->getColumns().writeText(*out_columns);
|
new_data_part->getColumns().writeText(*out_columns);
|
||||||
} /// close fd
|
} /// close fd
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
#include <Compression/CompressedReadBuffer.h>
|
#include <Compression/CompressedReadBuffer.h>
|
||||||
#include <Compression/CompressedWriteBuffer.h>
|
#include <Compression/CompressedWriteBuffer.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
#include <DataTypes/NestedUtils.h>
|
#include <DataTypes/NestedUtils.h>
|
||||||
#include <Storages/MergeTree/MergeTreeReaderCompact.h>
|
#include <Storages/MergeTree/MergeTreeReaderCompact.h>
|
||||||
#include <Storages/MergeTree/MergeTreeDataPartWriterCompact.h>
|
#include <Storages/MergeTree/MergeTreeDataPartWriterCompact.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
#include <Storages/MergeTree/IMergeTreeReader.h>
|
#include <Storages/MergeTree/IMergeTreeReader.h>
|
||||||
#include <DataTypes/NestedUtils.h>
|
#include <DataTypes/NestedUtils.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
|
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#include "MergeTreeDataPartWide.h"
|
#include "MergeTreeDataPartWide.h"
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Storages/MergeTree/MergeTreeReaderWide.h>
|
#include <Storages/MergeTree/MergeTreeReaderWide.h>
|
||||||
#include <Storages/MergeTree/MergeTreeDataPartWriterWide.h>
|
#include <Storages/MergeTree/MergeTreeDataPartWriterWide.h>
|
||||||
#include <Storages/MergeTree/IMergeTreeDataPartWriter.h>
|
#include <Storages/MergeTree/IMergeTreeDataPartWriter.h>
|
||||||
|
@ -3,8 +3,6 @@
|
|||||||
#include <optional>
|
#include <optional>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
|
#include <Storages/MergeTree/MergeTreeDataSelectExecutor.h>
|
||||||
#include <Storages/MergeTree/MergeTreeSelectProcessor.h>
|
#include <Storages/MergeTree/MergeTreeSelectProcessor.h>
|
||||||
#include <Storages/MergeTree/MergeTreeReverseSelectProcessor.h>
|
#include <Storages/MergeTree/MergeTreeReverseSelectProcessor.h>
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
#include <DataTypes/DataTypeDateTime.h>
|
#include <DataTypes/DataTypeDateTime.h>
|
||||||
#include <DataTypes/DataTypeDate.h>
|
#include <DataTypes/DataTypeDate.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <DataStreams/ITTLAlgorithm.h>
|
#include <DataStreams/ITTLAlgorithm.h>
|
||||||
#include <DataStreams/OneBlockInputStream.h>
|
#include <DataStreams/OneBlockInputStream.h>
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#include <Storages/MergeTree/MergeTreeIndexGranularityInfo.h>
|
#include <Storages/MergeTree/MergeTreeIndexGranularityInfo.h>
|
||||||
#include <Storages/MergeTree/MergeTreeData.h>
|
#include <Storages/MergeTree/MergeTreeData.h>
|
||||||
#include <Poco/Path.h>
|
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -17,8 +19,7 @@ std::optional<std::string> MergeTreeIndexGranularityInfo::getMarksExtensionFromF
|
|||||||
{
|
{
|
||||||
for (DiskDirectoryIteratorPtr it = disk->iterateDirectory(path_to_part); it->isValid(); it->next())
|
for (DiskDirectoryIteratorPtr it = disk->iterateDirectory(path_to_part); it->isValid(); it->next())
|
||||||
{
|
{
|
||||||
Poco::Path path(it->path());
|
const auto & ext = fs::path(it->path()).extension();
|
||||||
const auto & ext = "." + path.getExtension();
|
|
||||||
if (ext == getNonAdaptiveMrkExtension()
|
if (ext == getNonAdaptiveMrkExtension()
|
||||||
|| ext == getAdaptiveMrkExtension(MergeTreeDataPartType::WIDE)
|
|| ext == getAdaptiveMrkExtension(MergeTreeDataPartType::WIDE)
|
||||||
|| ext == getAdaptiveMrkExtension(MergeTreeDataPartType::COMPACT))
|
|| ext == getAdaptiveMrkExtension(MergeTreeDataPartType::COMPACT))
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#include <Storages/MergeTree/MergeTreeMarksLoader.h>
|
#include <Storages/MergeTree/MergeTreeMarksLoader.h>
|
||||||
#include <Storages/MergeTree/MergeTreeData.h>
|
#include <Storages/MergeTree/MergeTreeData.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
@ -4,9 +4,6 @@
|
|||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/Path.h>
|
|
||||||
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
|
||||||
|
@ -206,18 +206,18 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt
|
|||||||
/// Try to fetch part from S3 without copy and fallback to default copy
|
/// Try to fetch part from S3 without copy and fallback to default copy
|
||||||
/// if it's not possible
|
/// if it's not possible
|
||||||
moving_part.part->assertOnDisk();
|
moving_part.part->assertOnDisk();
|
||||||
String path_to_clone = data->getRelativeDataPath() + directory_to_move + "/";
|
String path_to_clone = fs::path(data->getRelativeDataPath()) / directory_to_move / "";
|
||||||
String relative_path = part->relative_path;
|
String relative_path = part->relative_path;
|
||||||
if (disk->exists(path_to_clone + relative_path))
|
if (disk->exists(path_to_clone + relative_path))
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Path " + fullPath(disk, path_to_clone + relative_path) + " already exists. Will remove it and clone again.");
|
LOG_WARNING(log, "Path " + fullPath(disk, path_to_clone + relative_path) + " already exists. Will remove it and clone again.");
|
||||||
disk->removeRecursive(path_to_clone + relative_path + "/");
|
disk->removeRecursive(fs::path(path_to_clone) / relative_path / "");
|
||||||
}
|
}
|
||||||
disk->createDirectories(path_to_clone);
|
disk->createDirectories(path_to_clone);
|
||||||
bool is_fetched = data->tryToFetchIfShared(*part, disk, path_to_clone + "/" + part->name);
|
bool is_fetched = data->tryToFetchIfShared(*part, disk, fs::path(path_to_clone) / part->name);
|
||||||
if (!is_fetched)
|
if (!is_fetched)
|
||||||
part->volume->getDisk()->copy(data->getRelativeDataPath() + relative_path + "/", disk, path_to_clone);
|
part->volume->getDisk()->copy(fs::path(data->getRelativeDataPath()) / relative_path / "", disk, path_to_clone);
|
||||||
part->volume->getDisk()->removeFileIfExists(path_to_clone + "/" + IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
part->volume->getDisk()->removeFileIfExists(fs::path(path_to_clone) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -226,7 +226,7 @@ MergeTreeData::DataPartPtr MergeTreePartsMover::clonePart(const MergeTreeMoveEnt
|
|||||||
|
|
||||||
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part->name, moving_part.reserved_space->getDisk(), 0);
|
auto single_disk_volume = std::make_shared<SingleDiskVolume>("volume_" + part->name, moving_part.reserved_space->getDisk(), 0);
|
||||||
MergeTreeData::MutableDataPartPtr cloned_part =
|
MergeTreeData::MutableDataPartPtr cloned_part =
|
||||||
data->createPart(part->name, single_disk_volume, directory_to_move + '/' + part->name);
|
data->createPart(part->name, single_disk_volume, fs::path(directory_to_move) / part->name);
|
||||||
LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->getFullPath());
|
LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part->getFullPath());
|
||||||
|
|
||||||
cloned_part->loadColumnsChecksumsIndexes(true, true);
|
cloned_part->loadColumnsChecksumsIndexes(true, true);
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
#include <Storages/MergeTree/MergeTreeDataPartCompact.h>
|
#include <Storages/MergeTree/MergeTreeDataPartCompact.h>
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/NestedUtils.h>
|
#include <DataTypes/NestedUtils.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/NestedUtils.h>
|
#include <DataTypes/NestedUtils.h>
|
||||||
#include <Columns/ColumnArray.h>
|
#include <Columns/ColumnArray.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#include <Storages/MergeTree/MergedBlockOutputStream.h>
|
#include <Storages/MergeTree/MergedBlockOutputStream.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
|
|
||||||
|
|
||||||
@ -123,7 +122,7 @@ void MergedBlockOutputStream::finalizePartOnDisk(
|
|||||||
{
|
{
|
||||||
if (new_part->uuid != UUIDHelpers::Nil)
|
if (new_part->uuid != UUIDHelpers::Nil)
|
||||||
{
|
{
|
||||||
auto out = volume->getDisk()->writeFile(part_path + IMergeTreeDataPart::UUID_FILE_NAME, 4096);
|
auto out = volume->getDisk()->writeFile(fs::path(part_path) / IMergeTreeDataPart::UUID_FILE_NAME, 4096);
|
||||||
HashingWriteBuffer out_hashing(*out);
|
HashingWriteBuffer out_hashing(*out);
|
||||||
writeUUIDText(new_part->uuid, out_hashing);
|
writeUUIDText(new_part->uuid, out_hashing);
|
||||||
checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count();
|
checksums.files[IMergeTreeDataPart::UUID_FILE_NAME].file_size = out_hashing.count();
|
||||||
@ -142,7 +141,7 @@ void MergedBlockOutputStream::finalizePartOnDisk(
|
|||||||
throw Exception("MinMax index was not initialized for new non-empty part " + new_part->name
|
throw Exception("MinMax index was not initialized for new non-empty part " + new_part->name
|
||||||
+ ". It is a bug.", ErrorCodes::LOGICAL_ERROR);
|
+ ". It is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
auto count_out = volume->getDisk()->writeFile(part_path + "count.txt", 4096);
|
auto count_out = volume->getDisk()->writeFile(fs::path(part_path) / "count.txt", 4096);
|
||||||
HashingWriteBuffer count_out_hashing(*count_out);
|
HashingWriteBuffer count_out_hashing(*count_out);
|
||||||
writeIntText(rows_count, count_out_hashing);
|
writeIntText(rows_count, count_out_hashing);
|
||||||
count_out_hashing.next();
|
count_out_hashing.next();
|
||||||
@ -157,7 +156,7 @@ void MergedBlockOutputStream::finalizePartOnDisk(
|
|||||||
if (!new_part->ttl_infos.empty())
|
if (!new_part->ttl_infos.empty())
|
||||||
{
|
{
|
||||||
/// Write a file with ttl infos in json format.
|
/// Write a file with ttl infos in json format.
|
||||||
auto out = volume->getDisk()->writeFile(part_path + "ttl.txt", 4096);
|
auto out = volume->getDisk()->writeFile(fs::path(part_path) / "ttl.txt", 4096);
|
||||||
HashingWriteBuffer out_hashing(*out);
|
HashingWriteBuffer out_hashing(*out);
|
||||||
new_part->ttl_infos.write(out_hashing);
|
new_part->ttl_infos.write(out_hashing);
|
||||||
checksums.files["ttl.txt"].file_size = out_hashing.count();
|
checksums.files["ttl.txt"].file_size = out_hashing.count();
|
||||||
@ -171,7 +170,7 @@ void MergedBlockOutputStream::finalizePartOnDisk(
|
|||||||
|
|
||||||
{
|
{
|
||||||
/// Write a file with a description of columns.
|
/// Write a file with a description of columns.
|
||||||
auto out = volume->getDisk()->writeFile(part_path + "columns.txt", 4096);
|
auto out = volume->getDisk()->writeFile(fs::path(part_path) / "columns.txt", 4096);
|
||||||
part_columns.writeText(*out);
|
part_columns.writeText(*out);
|
||||||
out->finalize();
|
out->finalize();
|
||||||
if (sync)
|
if (sync)
|
||||||
@ -192,7 +191,7 @@ void MergedBlockOutputStream::finalizePartOnDisk(
|
|||||||
|
|
||||||
{
|
{
|
||||||
/// Write file with checksums.
|
/// Write file with checksums.
|
||||||
auto out = volume->getDisk()->writeFile(part_path + "checksums.txt", 4096);
|
auto out = volume->getDisk()->writeFile(fs::path(part_path) / "checksums.txt", 4096);
|
||||||
checksums.write(*out);
|
checksums.write(*out);
|
||||||
out->finalize();
|
out->finalize();
|
||||||
if (sync)
|
if (sync)
|
||||||
|
@ -62,7 +62,7 @@ bool ReplicatedMergeTreeQueue::isVirtualPart(const MergeTreeData::DataPartPtr &
|
|||||||
|
|
||||||
bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper)
|
bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper)
|
||||||
{
|
{
|
||||||
auto queue_path = replica_path + "/queue";
|
String queue_path = fs::path(replica_path) / "queue";
|
||||||
LOG_DEBUG(log, "Loading queue from {}", queue_path);
|
LOG_DEBUG(log, "Loading queue from {}", queue_path);
|
||||||
|
|
||||||
bool updated = false;
|
bool updated = false;
|
||||||
@ -74,7 +74,7 @@ bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper)
|
|||||||
/// Reset batch size on initialization to recover from possible errors of too large batch size.
|
/// Reset batch size on initialization to recover from possible errors of too large batch size.
|
||||||
current_multi_batch_size = 1;
|
current_multi_batch_size = 1;
|
||||||
|
|
||||||
String log_pointer_str = zookeeper->get(replica_path + "/log_pointer");
|
String log_pointer_str = zookeeper->get(fs::path(replica_path) / "log_pointer");
|
||||||
log_pointer = log_pointer_str.empty() ? 0 : parse<UInt64>(log_pointer_str);
|
log_pointer = log_pointer_str.empty() ? 0 : parse<UInt64>(log_pointer_str);
|
||||||
|
|
||||||
std::unordered_set<String> already_loaded_paths;
|
std::unordered_set<String> already_loaded_paths;
|
||||||
@ -101,7 +101,7 @@ bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper)
|
|||||||
futures.reserve(children.size());
|
futures.reserve(children.size());
|
||||||
|
|
||||||
for (const String & child : children)
|
for (const String & child : children)
|
||||||
futures.emplace_back(child, zookeeper->asyncGet(queue_path + "/" + child));
|
futures.emplace_back(child, zookeeper->asyncGet(fs::path(queue_path) / child));
|
||||||
|
|
||||||
for (auto & future : futures)
|
for (auto & future : futures)
|
||||||
{
|
{
|
||||||
@ -116,7 +116,7 @@ bool ReplicatedMergeTreeQueue::load(zkutil::ZooKeeperPtr zookeeper)
|
|||||||
updated = true;
|
updated = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
zookeeper->tryGet(replica_path + "/mutation_pointer", mutation_pointer);
|
zookeeper->tryGet(fs::path(replica_path) / "mutation_pointer", mutation_pointer);
|
||||||
}
|
}
|
||||||
|
|
||||||
updateTimesInZooKeeper(zookeeper, min_unprocessed_insert_time_changed, {});
|
updateTimesInZooKeeper(zookeeper, min_unprocessed_insert_time_changed, {});
|
||||||
@ -399,7 +399,7 @@ void ReplicatedMergeTreeQueue::removeProcessedEntry(zkutil::ZooKeeperPtr zookeep
|
|||||||
if (!need_remove_from_zk)
|
if (!need_remove_from_zk)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
auto code = zookeeper->tryRemove(replica_path + "/queue/" + entry->znode_name);
|
auto code = zookeeper->tryRemove(fs::path(replica_path) / "queue" / entry->znode_name);
|
||||||
if (code != Coordination::Error::ZOK)
|
if (code != Coordination::Error::ZOK)
|
||||||
LOG_ERROR(log, "Couldn't remove {}/queue/{}: {}. This shouldn't happen often.", replica_path, entry->znode_name, Coordination::errorMessage(code));
|
LOG_ERROR(log, "Couldn't remove {}/queue/{}: {}. This shouldn't happen often.", replica_path, entry->znode_name, Coordination::errorMessage(code));
|
||||||
|
|
||||||
@ -455,7 +455,7 @@ bool ReplicatedMergeTreeQueue::remove(zkutil::ZooKeeperPtr zookeeper, const Stri
|
|||||||
|
|
||||||
notifySubscribers(queue_size);
|
notifySubscribers(queue_size);
|
||||||
|
|
||||||
zookeeper->tryRemove(replica_path + "/queue/" + found->znode_name);
|
zookeeper->tryRemove(fs::path(replica_path) / "queue" / found->znode_name);
|
||||||
updateTimesInZooKeeper(zookeeper, min_unprocessed_insert_time_changed, max_processed_insert_time_changed);
|
updateTimesInZooKeeper(zookeeper, min_unprocessed_insert_time_changed, max_processed_insert_time_changed);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -474,14 +474,14 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper
|
|||||||
if (pull_log_blocker.isCancelled())
|
if (pull_log_blocker.isCancelled())
|
||||||
throw Exception("Log pulling is cancelled", ErrorCodes::ABORTED);
|
throw Exception("Log pulling is cancelled", ErrorCodes::ABORTED);
|
||||||
|
|
||||||
String index_str = zookeeper->get(replica_path + "/log_pointer");
|
String index_str = zookeeper->get(fs::path(replica_path) / "log_pointer");
|
||||||
UInt64 index;
|
UInt64 index;
|
||||||
|
|
||||||
/// The version of "/log" is modified when new entries to merge/mutate/drop appear.
|
/// The version of "/log" is modified when new entries to merge/mutate/drop appear.
|
||||||
Coordination::Stat stat;
|
Coordination::Stat stat;
|
||||||
zookeeper->get(zookeeper_path + "/log", &stat);
|
zookeeper->get(fs::path(zookeeper_path) / "log", &stat);
|
||||||
|
|
||||||
Strings log_entries = zookeeper->getChildrenWatch(zookeeper_path + "/log", nullptr, watch_callback);
|
Strings log_entries = zookeeper->getChildrenWatch(fs::path(zookeeper_path) / "log", nullptr, watch_callback);
|
||||||
|
|
||||||
/// We update mutations after we have loaded the list of log entries, but before we insert them
|
/// We update mutations after we have loaded the list of log entries, but before we insert them
|
||||||
/// in the queue.
|
/// in the queue.
|
||||||
@ -494,7 +494,7 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper
|
|||||||
/// If we do not already have a pointer to the log, put a pointer to the first entry in it.
|
/// If we do not already have a pointer to the log, put a pointer to the first entry in it.
|
||||||
index = log_entries.empty() ? 0 : parse<UInt64>(std::min_element(log_entries.begin(), log_entries.end())->substr(strlen("log-")));
|
index = log_entries.empty() ? 0 : parse<UInt64>(std::min_element(log_entries.begin(), log_entries.end())->substr(strlen("log-")));
|
||||||
|
|
||||||
zookeeper->set(replica_path + "/log_pointer", toString(index));
|
zookeeper->set(fs::path(replica_path) / "log_pointer", toString(index));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -541,7 +541,7 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper
|
|||||||
futures.reserve(end - begin);
|
futures.reserve(end - begin);
|
||||||
|
|
||||||
for (auto it = begin; it != end; ++it)
|
for (auto it = begin; it != end; ++it)
|
||||||
futures.emplace_back(*it, zookeeper->asyncGet(zookeeper_path + "/log/" + *it));
|
futures.emplace_back(*it, zookeeper->asyncGet(fs::path(zookeeper_path) / "log" / *it));
|
||||||
|
|
||||||
/// Simultaneously add all new entries to the queue and move the pointer to the log.
|
/// Simultaneously add all new entries to the queue and move the pointer to the log.
|
||||||
|
|
||||||
@ -558,7 +558,7 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper
|
|||||||
copied_entries.emplace_back(LogEntry::parse(res.data, res.stat));
|
copied_entries.emplace_back(LogEntry::parse(res.data, res.stat));
|
||||||
|
|
||||||
ops.emplace_back(zkutil::makeCreateRequest(
|
ops.emplace_back(zkutil::makeCreateRequest(
|
||||||
replica_path + "/queue/queue-", res.data, zkutil::CreateMode::PersistentSequential));
|
fs::path(replica_path) / "queue/queue-", res.data, zkutil::CreateMode::PersistentSequential));
|
||||||
|
|
||||||
const auto & entry = *copied_entries.back();
|
const auto & entry = *copied_entries.back();
|
||||||
if (entry.type == LogEntry::GET_PART || entry.type == LogEntry::ATTACH_PART)
|
if (entry.type == LogEntry::GET_PART || entry.type == LogEntry::ATTACH_PART)
|
||||||
@ -573,11 +573,11 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper
|
|||||||
}
|
}
|
||||||
|
|
||||||
ops.emplace_back(zkutil::makeSetRequest(
|
ops.emplace_back(zkutil::makeSetRequest(
|
||||||
replica_path + "/log_pointer", toString(last_entry_index + 1), -1));
|
fs::path(replica_path) / "log_pointer", toString(last_entry_index + 1), -1));
|
||||||
|
|
||||||
if (min_unprocessed_insert_time_changed)
|
if (min_unprocessed_insert_time_changed)
|
||||||
ops.emplace_back(zkutil::makeSetRequest(
|
ops.emplace_back(zkutil::makeSetRequest(
|
||||||
replica_path + "/min_unprocessed_insert_time", toString(*min_unprocessed_insert_time_changed), -1));
|
fs::path(replica_path) / "min_unprocessed_insert_time", toString(*min_unprocessed_insert_time_changed), -1));
|
||||||
|
|
||||||
auto responses = zookeeper->multi(ops);
|
auto responses = zookeeper->multi(ops);
|
||||||
|
|
||||||
@ -655,7 +655,7 @@ void ReplicatedMergeTreeQueue::updateMutations(zkutil::ZooKeeperPtr zookeeper, C
|
|||||||
{
|
{
|
||||||
std::lock_guard lock(update_mutations_mutex);
|
std::lock_guard lock(update_mutations_mutex);
|
||||||
|
|
||||||
Strings entries_in_zk = zookeeper->getChildrenWatch(zookeeper_path + "/mutations", nullptr, watch_callback);
|
Strings entries_in_zk = zookeeper->getChildrenWatch(fs::path(zookeeper_path) / "mutations", nullptr, watch_callback);
|
||||||
StringSet entries_in_zk_set(entries_in_zk.begin(), entries_in_zk.end());
|
StringSet entries_in_zk_set(entries_in_zk.begin(), entries_in_zk.end());
|
||||||
|
|
||||||
/// Compare with the local state, delete obsolete entries and determine which new entries to load.
|
/// Compare with the local state, delete obsolete entries and determine which new entries to load.
|
||||||
@ -712,7 +712,7 @@ void ReplicatedMergeTreeQueue::updateMutations(zkutil::ZooKeeperPtr zookeeper, C
|
|||||||
|
|
||||||
std::vector<std::future<Coordination::GetResponse>> futures;
|
std::vector<std::future<Coordination::GetResponse>> futures;
|
||||||
for (const String & entry : entries_to_load)
|
for (const String & entry : entries_to_load)
|
||||||
futures.emplace_back(zookeeper->asyncGet(zookeeper_path + "/mutations/" + entry));
|
futures.emplace_back(zookeeper->asyncGet(fs::path(zookeeper_path) / "mutations" / entry));
|
||||||
|
|
||||||
std::vector<ReplicatedMergeTreeMutationEntryPtr> new_mutations;
|
std::vector<ReplicatedMergeTreeMutationEntryPtr> new_mutations;
|
||||||
for (size_t i = 0; i < entries_to_load.size(); ++i)
|
for (size_t i = 0; i < entries_to_load.size(); ++i)
|
||||||
@ -796,7 +796,7 @@ ReplicatedMergeTreeMutationEntryPtr ReplicatedMergeTreeQueue::removeMutation(
|
|||||||
{
|
{
|
||||||
std::lock_guard lock(update_mutations_mutex);
|
std::lock_guard lock(update_mutations_mutex);
|
||||||
|
|
||||||
auto rc = zookeeper->tryRemove(zookeeper_path + "/mutations/" + mutation_id);
|
auto rc = zookeeper->tryRemove(fs::path(zookeeper_path) / "mutations" / mutation_id);
|
||||||
if (rc == Coordination::Error::ZOK)
|
if (rc == Coordination::Error::ZOK)
|
||||||
LOG_DEBUG(log, "Removed mutation {} from ZooKeeper.", mutation_id);
|
LOG_DEBUG(log, "Removed mutation {} from ZooKeeper.", mutation_id);
|
||||||
|
|
||||||
@ -940,12 +940,12 @@ void ReplicatedMergeTreeQueue::removePartProducingOpsInRange(
|
|||||||
{
|
{
|
||||||
if ((*it)->currently_executing)
|
if ((*it)->currently_executing)
|
||||||
to_wait.push_back(*it);
|
to_wait.push_back(*it);
|
||||||
auto code = zookeeper->tryRemove(replica_path + "/queue/" + (*it)->znode_name);
|
auto code = zookeeper->tryRemove(fs::path(replica_path) / "queue" / (*it)->znode_name);
|
||||||
/// FIXME it's probably unsafe to remove entries non-atomically
|
/// FIXME it's probably unsafe to remove entries non-atomically
|
||||||
/// when this method called directly from alter query (not from replication queue task),
|
/// when this method called directly from alter query (not from replication queue task),
|
||||||
/// because entries will be lost if ALTER fails.
|
/// because entries will be lost if ALTER fails.
|
||||||
if (code != Coordination::Error::ZOK)
|
if (code != Coordination::Error::ZOK)
|
||||||
LOG_INFO(log, "Couldn't remove {}: {}", replica_path + "/queue/" + (*it)->znode_name, Coordination::errorMessage(code));
|
LOG_INFO(log, "Couldn't remove {}: {}", (fs::path(replica_path) / "queue" / (*it)->znode_name).string(), Coordination::errorMessage(code));
|
||||||
|
|
||||||
updateStateOnQueueEntryRemoval(
|
updateStateOnQueueEntryRemoval(
|
||||||
*it, /* is_successful = */ false,
|
*it, /* is_successful = */ false,
|
||||||
@ -1554,7 +1554,7 @@ bool ReplicatedMergeTreeQueue::tryFinalizeMutations(zkutil::ZooKeeperPtr zookeep
|
|||||||
|
|
||||||
if (!finished.empty())
|
if (!finished.empty())
|
||||||
{
|
{
|
||||||
zookeeper->set(replica_path + "/mutation_pointer", finished.back()->znode_name);
|
zookeeper->set(fs::path(replica_path) / "mutation_pointer", finished.back()->znode_name);
|
||||||
|
|
||||||
std::lock_guard lock(state_mutex);
|
std::lock_guard lock(state_mutex);
|
||||||
|
|
||||||
@ -1750,22 +1750,22 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Load current quorum status.
|
/// Load current quorum status.
|
||||||
auto quorum_status_future = zookeeper->asyncTryGet(queue.zookeeper_path + "/quorum/status");
|
auto quorum_status_future = zookeeper->asyncTryGet(fs::path(queue.zookeeper_path) / "quorum" / "status");
|
||||||
|
|
||||||
/// Load current inserts
|
/// Load current inserts
|
||||||
std::unordered_set<String> lock_holder_paths;
|
std::unordered_set<String> lock_holder_paths;
|
||||||
for (const String & entry : zookeeper->getChildren(queue.zookeeper_path + "/temp"))
|
for (const String & entry : zookeeper->getChildren(fs::path(queue.zookeeper_path) / "temp"))
|
||||||
{
|
{
|
||||||
if (startsWith(entry, "abandonable_lock-"))
|
if (startsWith(entry, "abandonable_lock-"))
|
||||||
lock_holder_paths.insert(queue.zookeeper_path + "/temp/" + entry);
|
lock_holder_paths.insert(fs::path(queue.zookeeper_path) / "temp" / entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!lock_holder_paths.empty())
|
if (!lock_holder_paths.empty())
|
||||||
{
|
{
|
||||||
Strings partitions = zookeeper->getChildren(queue.zookeeper_path + "/block_numbers");
|
Strings partitions = zookeeper->getChildren(fs::path(queue.zookeeper_path) / "block_numbers");
|
||||||
std::vector<std::future<Coordination::ListResponse>> lock_futures;
|
std::vector<std::future<Coordination::ListResponse>> lock_futures;
|
||||||
for (const String & partition : partitions)
|
for (const String & partition : partitions)
|
||||||
lock_futures.push_back(zookeeper->asyncGetChildren(queue.zookeeper_path + "/block_numbers/" + partition));
|
lock_futures.push_back(zookeeper->asyncGetChildren(fs::path(queue.zookeeper_path) / "block_numbers" / partition));
|
||||||
|
|
||||||
struct BlockInfoInZooKeeper
|
struct BlockInfoInZooKeeper
|
||||||
{
|
{
|
||||||
@ -1786,7 +1786,7 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate(
|
|||||||
if (startsWith(entry, "block-"))
|
if (startsWith(entry, "block-"))
|
||||||
{
|
{
|
||||||
Int64 block_number = parse<Int64>(entry.substr(strlen("block-")));
|
Int64 block_number = parse<Int64>(entry.substr(strlen("block-")));
|
||||||
String zk_path = queue.zookeeper_path + "/block_numbers/" + partitions[i] + "/" + entry;
|
String zk_path = fs::path(queue.zookeeper_path) / "block_numbers" / partitions[i] / entry;
|
||||||
block_infos.emplace_back(
|
block_infos.emplace_back(
|
||||||
BlockInfoInZooKeeper{partitions[i], block_number, zk_path, zookeeper->asyncTryGet(zk_path)});
|
BlockInfoInZooKeeper{partitions[i], block_number, zk_path, zookeeper->asyncTryGet(zk_path)});
|
||||||
}
|
}
|
||||||
|
@ -238,7 +238,7 @@ void ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart()
|
|||||||
auto zookeeper = storage.getZooKeeper();
|
auto zookeeper = storage.getZooKeeper();
|
||||||
|
|
||||||
String quorum_str;
|
String quorum_str;
|
||||||
if (zookeeper->tryGet(storage.zookeeper_path + "/quorum/status", quorum_str))
|
if (zookeeper->tryGet(fs::path(storage.zookeeper_path) / "quorum" / "status", quorum_str))
|
||||||
{
|
{
|
||||||
ReplicatedMergeTreeQuorumEntry quorum_entry(quorum_str);
|
ReplicatedMergeTreeQuorumEntry quorum_entry(quorum_str);
|
||||||
|
|
||||||
@ -251,12 +251,12 @@ void ReplicatedMergeTreeRestartingThread::updateQuorumIfWeHavePart()
|
|||||||
}
|
}
|
||||||
|
|
||||||
Strings part_names;
|
Strings part_names;
|
||||||
String parallel_quorum_parts_path = storage.zookeeper_path + "/quorum/parallel";
|
String parallel_quorum_parts_path = fs::path(storage.zookeeper_path) / "quorum" / "parallel";
|
||||||
if (zookeeper->tryGetChildren(parallel_quorum_parts_path, part_names) == Coordination::Error::ZOK)
|
if (zookeeper->tryGetChildren(parallel_quorum_parts_path, part_names) == Coordination::Error::ZOK)
|
||||||
{
|
{
|
||||||
for (auto & part_name : part_names)
|
for (auto & part_name : part_names)
|
||||||
{
|
{
|
||||||
if (zookeeper->tryGet(parallel_quorum_parts_path + "/" + part_name, quorum_str))
|
if (zookeeper->tryGet(fs::path(parallel_quorum_parts_path) / part_name, quorum_str))
|
||||||
{
|
{
|
||||||
ReplicatedMergeTreeQuorumEntry quorum_entry(quorum_str);
|
ReplicatedMergeTreeQuorumEntry quorum_entry(quorum_str);
|
||||||
if (!quorum_entry.replicas.count(storage.replica_name)
|
if (!quorum_entry.replicas.count(storage.replica_name)
|
||||||
@ -278,7 +278,7 @@ void ReplicatedMergeTreeRestartingThread::activateReplica()
|
|||||||
/// How other replicas can access this one.
|
/// How other replicas can access this one.
|
||||||
ReplicatedMergeTreeAddress address = storage.getReplicatedMergeTreeAddress();
|
ReplicatedMergeTreeAddress address = storage.getReplicatedMergeTreeAddress();
|
||||||
|
|
||||||
String is_active_path = storage.replica_path + "/is_active";
|
String is_active_path = fs::path(storage.replica_path) / "is_active";
|
||||||
|
|
||||||
/** If the node is marked as active, but the mark is made in the same instance, delete it.
|
/** If the node is marked as active, but the mark is made in the same instance, delete it.
|
||||||
* This is possible only when session in ZooKeeper expires.
|
* This is possible only when session in ZooKeeper expires.
|
||||||
@ -302,7 +302,7 @@ void ReplicatedMergeTreeRestartingThread::activateReplica()
|
|||||||
/// Simultaneously declare that this replica is active, and update the host.
|
/// Simultaneously declare that this replica is active, and update the host.
|
||||||
Coordination::Requests ops;
|
Coordination::Requests ops;
|
||||||
ops.emplace_back(zkutil::makeCreateRequest(is_active_path, active_node_identifier, zkutil::CreateMode::Ephemeral));
|
ops.emplace_back(zkutil::makeCreateRequest(is_active_path, active_node_identifier, zkutil::CreateMode::Ephemeral));
|
||||||
ops.emplace_back(zkutil::makeSetRequest(storage.replica_path + "/host", address.toString(), -1));
|
ops.emplace_back(zkutil::makeSetRequest(fs::path(storage.replica_path) / "host", address.toString(), -1));
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -311,7 +311,7 @@ void ReplicatedMergeTreeRestartingThread::activateReplica()
|
|||||||
catch (const Coordination::Exception & e)
|
catch (const Coordination::Exception & e)
|
||||||
{
|
{
|
||||||
String existing_replica_host;
|
String existing_replica_host;
|
||||||
zookeeper->tryGet(storage.replica_path + "/host", existing_replica_host);
|
zookeeper->tryGet(fs::path(storage.replica_path) / "host", existing_replica_host);
|
||||||
|
|
||||||
if (existing_replica_host.empty())
|
if (existing_replica_host.empty())
|
||||||
existing_replica_host = "without host node";
|
existing_replica_host = "without host node";
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Poco/DirectoryIterator.h>
|
#include <Poco/DirectoryIterator.h>
|
||||||
|
|
||||||
#include <Storages/MergeTree/MergeTreeIndexGranularity.h>
|
#include <Storages/MergeTree/MergeTreeIndexGranularity.h>
|
||||||
@ -70,7 +69,7 @@ IMergeTreeDataPart::Checksums checkDataPart(
|
|||||||
NamesAndTypesList columns_txt;
|
NamesAndTypesList columns_txt;
|
||||||
|
|
||||||
{
|
{
|
||||||
auto buf = disk->readFile(path + "columns.txt");
|
auto buf = disk->readFile(fs::path(path) / "columns.txt");
|
||||||
columns_txt.readText(*buf);
|
columns_txt.readText(*buf);
|
||||||
assertEOF(*buf);
|
assertEOF(*buf);
|
||||||
}
|
}
|
||||||
@ -231,9 +230,9 @@ IMergeTreeDataPart::Checksums checkDataPart(
|
|||||||
/// Checksums from the rest files listed in checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums.
|
/// Checksums from the rest files listed in checksums.txt. May be absent. If present, they are subsequently compared with the actual data checksums.
|
||||||
IMergeTreeDataPart::Checksums checksums_txt;
|
IMergeTreeDataPart::Checksums checksums_txt;
|
||||||
|
|
||||||
if (require_checksums || disk->exists(path + "checksums.txt"))
|
if (require_checksums || disk->exists(fs::path(path) / "checksums.txt"))
|
||||||
{
|
{
|
||||||
auto buf = disk->readFile(path + "checksums.txt");
|
auto buf = disk->readFile(fs::path(path) / "checksums.txt");
|
||||||
checksums_txt.read(*buf);
|
checksums_txt.read(*buf);
|
||||||
assertEOF(*buf);
|
assertEOF(*buf);
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user