mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' into aku/window-prototype
This commit is contained in:
commit
51b2329295
18
.github/workflows/anchore-analysis.yml
vendored
18
.github/workflows/anchore-analysis.yml
vendored
@ -1,8 +1,8 @@
|
||||
# This workflow checks out code, performs an Anchore container image
|
||||
# vulnerability and compliance scan, and integrates the results with
|
||||
# GitHub Advanced Security code scanning feature. For more information on
|
||||
# GitHub Advanced Security code scanning feature. For more information on
|
||||
# the Anchore scan action usage and parameters, see
|
||||
# https://github.com/anchore/scan-action. For more information on
|
||||
# https://github.com/anchore/scan-action. For more information on
|
||||
# Anchore container image scanning in general, see
|
||||
# https://docs.anchore.com.
|
||||
|
||||
@ -28,18 +28,12 @@ jobs:
|
||||
perl -pi -e 's|=\$version||g' Dockerfile
|
||||
docker build . --file Dockerfile --tag localbuild/testimage:latest
|
||||
- name: Run the local Anchore scan action itself with GitHub Advanced Security code scanning integration enabled
|
||||
uses: anchore/scan-action@master
|
||||
uses: anchore/scan-action@v2
|
||||
id: scan
|
||||
with:
|
||||
image-reference: "localbuild/testimage:latest"
|
||||
dockerfile-path: "docker/server/Dockerfile"
|
||||
image: "localbuild/testimage:latest"
|
||||
acs-report-enable: true
|
||||
fail-build: true
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v1.0.0
|
||||
with:
|
||||
name: AnchoreReports
|
||||
path: ./anchore-reports/
|
||||
- name: Upload Anchore Scan Report
|
||||
uses: github/codeql-action/upload-sarif@v1
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
sarif_file: ${{ steps.scan.outputs.sarif }}
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -53,7 +53,8 @@
|
||||
url = https://github.com/ClickHouse-Extras/Turbo-Base64.git
|
||||
[submodule "contrib/arrow"]
|
||||
path = contrib/arrow
|
||||
url = https://github.com/apache/arrow
|
||||
url = https://github.com/ClickHouse-Extras/arrow
|
||||
branch = clickhouse-arrow-2.0.0
|
||||
[submodule "contrib/thrift"]
|
||||
path = contrib/thrift
|
||||
url = https://github.com/apache/thrift.git
|
||||
|
@ -6,6 +6,12 @@
|
||||
#include <unistd.h>
|
||||
#include <functional>
|
||||
#include <sys/file.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <csignal>
|
||||
#include <dlfcn.h>
|
||||
#include <fcntl.h>
|
||||
#include <fstream>
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -83,6 +89,8 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
/// it also binded to M-p/M-n).
|
||||
rx.bind_key(Replxx::KEY::meta('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_NEXT, code); });
|
||||
rx.bind_key(Replxx::KEY::meta('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_PREVIOUS, code); });
|
||||
|
||||
rx.bind_key(Replxx::KEY::meta('E'), [this](char32_t) { openEditor(); return Replxx::ACTION_RESULT::CONTINUE; });
|
||||
}
|
||||
|
||||
ReplxxLineReader::~ReplxxLineReader()
|
||||
@ -127,7 +135,114 @@ void ReplxxLineReader::addToHistory(const String & line)
|
||||
rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
}
|
||||
|
||||
int ReplxxLineReader::execute(const std::string & command)
|
||||
{
|
||||
std::vector<char> argv0("sh", &("sh"[3]));
|
||||
std::vector<char> argv1("-c", &("-c"[3]));
|
||||
std::vector<char> argv2(command.data(), command.data() + command.size() + 1);
|
||||
|
||||
const char * filename = "/bin/sh";
|
||||
char * const argv[] = {argv0.data(), argv1.data(), argv2.data(), nullptr};
|
||||
|
||||
static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork");
|
||||
if (!real_vfork)
|
||||
{
|
||||
rx.print("Cannot find symbol vfork in myself: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
|
||||
pid_t pid = reinterpret_cast<pid_t (*)()>(real_vfork)();
|
||||
|
||||
if (-1 == pid)
|
||||
{
|
||||
rx.print("Cannot vfork: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (0 == pid)
|
||||
{
|
||||
sigset_t mask;
|
||||
sigemptyset(&mask);
|
||||
sigprocmask(0, nullptr, &mask);
|
||||
sigprocmask(SIG_UNBLOCK, &mask, nullptr);
|
||||
|
||||
execv(filename, argv);
|
||||
_exit(-1);
|
||||
}
|
||||
|
||||
int status = 0;
|
||||
if (-1 == waitpid(pid, &status, 0))
|
||||
{
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
void ReplxxLineReader::openEditor()
|
||||
{
|
||||
char filename[] = "clickhouse_replxx_XXXXXX.sql";
|
||||
int fd = ::mkstemps(filename, 4);
|
||||
if (-1 == fd)
|
||||
{
|
||||
rx.print("Cannot create temporary file to edit query: %s\n", errnoToString(errno).c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
String editor = std::getenv("EDITOR");
|
||||
if (editor.empty())
|
||||
editor = "vim";
|
||||
|
||||
replxx::Replxx::State state(rx.get_state());
|
||||
|
||||
size_t bytes_written = 0;
|
||||
const char * begin = state.text();
|
||||
size_t offset = strlen(state.text());
|
||||
while (bytes_written != offset)
|
||||
{
|
||||
ssize_t res = ::write(fd, begin + bytes_written, offset - bytes_written);
|
||||
if ((-1 == res || 0 == res) && errno != EINTR)
|
||||
{
|
||||
rx.print("Cannot write to temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||
return;
|
||||
}
|
||||
bytes_written += res;
|
||||
}
|
||||
|
||||
if (0 != ::close(fd))
|
||||
{
|
||||
rx.print("Cannot close temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
if (0 == execute(editor + " " + filename))
|
||||
{
|
||||
try
|
||||
{
|
||||
std::ifstream t(filename);
|
||||
std::string str;
|
||||
t.seekg(0, std::ios::end);
|
||||
str.reserve(t.tellg());
|
||||
t.seekg(0, std::ios::beg);
|
||||
str.assign((std::istreambuf_iterator<char>(t)), std::istreambuf_iterator<char>());
|
||||
rx.set_state(replxx::Replxx::State(str.c_str(), str.size()));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
rx.print("Cannot read from temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (bracketed_paste_enabled)
|
||||
enableBracketedPaste();
|
||||
|
||||
if (0 != ::unlink(filename))
|
||||
rx.print("Cannot remove temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||
}
|
||||
|
||||
void ReplxxLineReader::enableBracketedPaste()
|
||||
{
|
||||
bracketed_paste_enabled = true;
|
||||
rx.enable_bracketed_paste();
|
||||
};
|
||||
|
@ -22,10 +22,13 @@ public:
|
||||
private:
|
||||
InputStatus readOneLine(const String & prompt) override;
|
||||
void addToHistory(const String & line) override;
|
||||
int execute(const std::string & command);
|
||||
void openEditor();
|
||||
|
||||
replxx::Replxx rx;
|
||||
replxx::Replxx::highlighter_callback_t highlighter;
|
||||
|
||||
// used to call flock() to synchronize multiple clients using same history file
|
||||
int history_file_fd = -1;
|
||||
bool bracketed_paste_enabled = false;
|
||||
};
|
||||
|
@ -4,6 +4,11 @@
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/resource.h>
|
||||
#if defined(__linux__)
|
||||
#include <sys/prctl.h>
|
||||
#endif
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
@ -12,7 +17,6 @@
|
||||
#include <unistd.h>
|
||||
|
||||
#include <typeinfo>
|
||||
#include <sys/resource.h>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
@ -22,7 +26,6 @@
|
||||
#include <Poco/Observer.h>
|
||||
#include <Poco/AutoPtr.h>
|
||||
#include <Poco/PatternFormatter.h>
|
||||
#include <Poco/TaskManager.h>
|
||||
#include <Poco/File.h>
|
||||
#include <Poco/Path.h>
|
||||
#include <Poco/Message.h>
|
||||
@ -470,7 +473,6 @@ BaseDaemon::~BaseDaemon()
|
||||
|
||||
void BaseDaemon::terminate()
|
||||
{
|
||||
getTaskManager().cancelAll();
|
||||
if (::raise(SIGTERM) != 0)
|
||||
throw Poco::SystemException("cannot terminate process");
|
||||
}
|
||||
@ -478,22 +480,11 @@ void BaseDaemon::terminate()
|
||||
void BaseDaemon::kill()
|
||||
{
|
||||
dumpCoverageReportIfPossible();
|
||||
pid.reset();
|
||||
pid_file.reset();
|
||||
if (::raise(SIGKILL) != 0)
|
||||
throw Poco::SystemException("cannot kill process");
|
||||
}
|
||||
|
||||
void BaseDaemon::sleep(double seconds)
|
||||
{
|
||||
wakeup_event.reset();
|
||||
wakeup_event.tryWait(seconds * 1000);
|
||||
}
|
||||
|
||||
void BaseDaemon::wakeup()
|
||||
{
|
||||
wakeup_event.set();
|
||||
}
|
||||
|
||||
std::string BaseDaemon::getDefaultCorePath() const
|
||||
{
|
||||
return "/opt/cores/";
|
||||
@ -564,7 +555,6 @@ void BaseDaemon::initialize(Application & self)
|
||||
{
|
||||
closeFDs();
|
||||
|
||||
task_manager = std::make_unique<Poco::TaskManager>();
|
||||
ServerApplication::initialize(self);
|
||||
|
||||
/// now highest priority (lowest value) is PRIO_APPLICATION = -100, we want higher!
|
||||
@ -648,10 +638,6 @@ void BaseDaemon::initialize(Application & self)
|
||||
throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path);
|
||||
}
|
||||
|
||||
/// Create pid file.
|
||||
if (config().has("pid"))
|
||||
pid.emplace(config().getString("pid"), DB::StatusFile::write_pid);
|
||||
|
||||
/// Change path for logging.
|
||||
if (!log_path.empty())
|
||||
{
|
||||
@ -667,9 +653,17 @@ void BaseDaemon::initialize(Application & self)
|
||||
throw Poco::Exception("Cannot change directory to /tmp");
|
||||
}
|
||||
|
||||
// sensitive data masking rules are not used here
|
||||
/// sensitive data masking rules are not used here
|
||||
buildLoggers(config(), logger(), self.commandName());
|
||||
|
||||
/// After initialized loggers but before initialized signal handling.
|
||||
if (should_setup_watchdog)
|
||||
setupWatchdog();
|
||||
|
||||
/// Create pid file.
|
||||
if (config().has("pid"))
|
||||
pid_file.emplace(config().getString("pid"), DB::StatusFile::write_pid);
|
||||
|
||||
if (is_daemon)
|
||||
{
|
||||
/** Change working directory to the directory to write core dumps.
|
||||
@ -704,54 +698,71 @@ void BaseDaemon::initialize(Application & self)
|
||||
}
|
||||
|
||||
|
||||
static void addSignalHandler(const std::vector<int> & signals, signal_function handler, std::vector<int> * out_handled_signals)
|
||||
{
|
||||
struct sigaction sa;
|
||||
memset(&sa, 0, sizeof(sa));
|
||||
sa.sa_sigaction = handler;
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
sigemptyset(&sa.sa_mask);
|
||||
for (auto signal : signals)
|
||||
sigaddset(&sa.sa_mask, signal);
|
||||
#else
|
||||
if (sigemptyset(&sa.sa_mask))
|
||||
throw Poco::Exception("Cannot set signal handler.");
|
||||
|
||||
for (auto signal : signals)
|
||||
if (sigaddset(&sa.sa_mask, signal))
|
||||
throw Poco::Exception("Cannot set signal handler.");
|
||||
#endif
|
||||
|
||||
for (auto signal : signals)
|
||||
if (sigaction(signal, &sa, nullptr))
|
||||
throw Poco::Exception("Cannot set signal handler.");
|
||||
|
||||
if (out_handled_signals)
|
||||
std::copy(signals.begin(), signals.end(), std::back_inserter(*out_handled_signals));
|
||||
};
|
||||
|
||||
|
||||
static void blockSignals(const std::vector<int> & signals)
|
||||
{
|
||||
sigset_t sig_set;
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
sigemptyset(&sig_set);
|
||||
for (auto signal : signals)
|
||||
sigaddset(&sig_set, signal);
|
||||
#else
|
||||
if (sigemptyset(&sig_set))
|
||||
throw Poco::Exception("Cannot block signal.");
|
||||
|
||||
for (auto signal : signals)
|
||||
if (sigaddset(&sig_set, signal))
|
||||
throw Poco::Exception("Cannot block signal.");
|
||||
#endif
|
||||
|
||||
if (pthread_sigmask(SIG_BLOCK, &sig_set, nullptr))
|
||||
throw Poco::Exception("Cannot block signal.");
|
||||
};
|
||||
|
||||
|
||||
void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
{
|
||||
SentryWriter::initialize(config());
|
||||
std::set_terminate(terminate_handler);
|
||||
|
||||
/// We want to avoid SIGPIPE when working with sockets and pipes, and just handle return value/errno instead.
|
||||
{
|
||||
sigset_t sig_set;
|
||||
if (sigemptyset(&sig_set) || sigaddset(&sig_set, SIGPIPE) || pthread_sigmask(SIG_BLOCK, &sig_set, nullptr))
|
||||
throw Poco::Exception("Cannot block signal.");
|
||||
}
|
||||
blockSignals({SIGPIPE});
|
||||
|
||||
/// Setup signal handlers.
|
||||
auto add_signal_handler =
|
||||
[this](const std::vector<int> & signals, signal_function handler)
|
||||
{
|
||||
struct sigaction sa;
|
||||
memset(&sa, 0, sizeof(sa));
|
||||
sa.sa_sigaction = handler;
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
|
||||
{
|
||||
#if defined(OS_DARWIN)
|
||||
sigemptyset(&sa.sa_mask);
|
||||
for (auto signal : signals)
|
||||
sigaddset(&sa.sa_mask, signal);
|
||||
#else
|
||||
if (sigemptyset(&sa.sa_mask))
|
||||
throw Poco::Exception("Cannot set signal handler.");
|
||||
|
||||
for (auto signal : signals)
|
||||
if (sigaddset(&sa.sa_mask, signal))
|
||||
throw Poco::Exception("Cannot set signal handler.");
|
||||
#endif
|
||||
|
||||
for (auto signal : signals)
|
||||
if (sigaction(signal, &sa, nullptr))
|
||||
throw Poco::Exception("Cannot set signal handler.");
|
||||
|
||||
std::copy(signals.begin(), signals.end(), std::back_inserter(handled_signals));
|
||||
}
|
||||
};
|
||||
|
||||
/// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime.
|
||||
|
||||
add_signal_handler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP}, signalHandler);
|
||||
add_signal_handler({SIGHUP, SIGUSR1}, closeLogsSignalHandler);
|
||||
add_signal_handler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler);
|
||||
addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP}, signalHandler, &handled_signals);
|
||||
addSignalHandler({SIGHUP, SIGUSR1}, closeLogsSignalHandler, &handled_signals);
|
||||
addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals);
|
||||
|
||||
#if defined(SANITIZER)
|
||||
__sanitizer_set_death_callback(sanitizerDeathCallback);
|
||||
@ -786,23 +797,6 @@ void BaseDaemon::logRevision() const
|
||||
+ ", PID " + std::to_string(getpid()));
|
||||
}
|
||||
|
||||
/// Makes server shutdown if at least one Poco::Task have failed.
|
||||
void BaseDaemon::exitOnTaskError()
|
||||
{
|
||||
Poco::Observer<BaseDaemon, Poco::TaskFailedNotification> obs(*this, &BaseDaemon::handleNotification);
|
||||
getTaskManager().addObserver(obs);
|
||||
}
|
||||
|
||||
/// Used for exitOnTaskError()
|
||||
void BaseDaemon::handleNotification(Poco::TaskFailedNotification *_tfn)
|
||||
{
|
||||
task_failed = true;
|
||||
Poco::AutoPtr<Poco::TaskFailedNotification> fn(_tfn);
|
||||
Poco::Logger * lg = &(logger());
|
||||
LOG_ERROR(lg, "Task '{}' failed. Daemon is shutting down. Reason - {}", fn->task()->name(), fn->reason().displayText());
|
||||
ServerApplication::terminate();
|
||||
}
|
||||
|
||||
void BaseDaemon::defineOptions(Poco::Util::OptionSet & new_options)
|
||||
{
|
||||
new_options.addOption(
|
||||
@ -863,13 +857,144 @@ void BaseDaemon::onInterruptSignals(int signal_id)
|
||||
if (sigint_signals_counter >= 2)
|
||||
{
|
||||
LOG_INFO(&logger(), "Received second signal Interrupt. Immediately terminate.");
|
||||
kill();
|
||||
call_default_signal_handler(signal_id);
|
||||
/// If the above did not help.
|
||||
_exit(128 + signal_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void BaseDaemon::waitForTerminationRequest()
|
||||
{
|
||||
/// NOTE: as we already process signals via pipe, we don't have to block them with sigprocmask in threads
|
||||
std::unique_lock<std::mutex> lock(signal_handler_mutex);
|
||||
signal_event.wait(lock, [this](){ return terminate_signals_counter > 0; });
|
||||
}
|
||||
|
||||
|
||||
void BaseDaemon::shouldSetupWatchdog(char * argv0_)
|
||||
{
|
||||
should_setup_watchdog = true;
|
||||
argv0 = argv0_;
|
||||
}
|
||||
|
||||
|
||||
void BaseDaemon::setupWatchdog()
|
||||
{
|
||||
/// Initialize in advance to avoid double initialization in forked processes.
|
||||
DateLUT::instance();
|
||||
|
||||
std::string original_process_name;
|
||||
if (argv0)
|
||||
original_process_name = argv0;
|
||||
|
||||
while (true)
|
||||
{
|
||||
static pid_t pid = -1;
|
||||
pid = fork();
|
||||
|
||||
if (-1 == pid)
|
||||
throw Poco::Exception("Cannot fork");
|
||||
|
||||
if (0 == pid)
|
||||
{
|
||||
logger().information("Forked a child process to watch");
|
||||
#if defined(__linux__)
|
||||
if (0 != prctl(PR_SET_PDEATHSIG, SIGKILL))
|
||||
logger().warning("Cannot do prctl to ask termination with parent.");
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
/// Change short thread name and process name.
|
||||
setThreadName("clckhouse-watch"); /// 15 characters
|
||||
|
||||
if (argv0)
|
||||
{
|
||||
const char * new_process_name = "clickhouse-watchdog";
|
||||
memset(argv0, 0, original_process_name.size());
|
||||
memcpy(argv0, new_process_name, std::min(strlen(new_process_name), original_process_name.size()));
|
||||
}
|
||||
|
||||
logger().information(fmt::format("Will watch for the process with pid {}", pid));
|
||||
|
||||
/// Forward signals to the child process.
|
||||
addSignalHandler(
|
||||
{SIGHUP, SIGUSR1, SIGINT, SIGQUIT, SIGTERM},
|
||||
[](int sig, siginfo_t *, void *)
|
||||
{
|
||||
/// Forward all signals except INT as it can be send by terminal to the process group when user press Ctrl+C,
|
||||
/// and we process double delivery of this signal as immediate termination.
|
||||
if (sig == SIGINT)
|
||||
return;
|
||||
|
||||
const char * error_message = "Cannot forward signal to the child process.\n";
|
||||
if (0 != ::kill(pid, sig))
|
||||
{
|
||||
auto res = write(STDERR_FILENO, error_message, strlen(error_message));
|
||||
(void)res;
|
||||
}
|
||||
},
|
||||
nullptr);
|
||||
|
||||
int status = 0;
|
||||
do
|
||||
{
|
||||
if (-1 != waitpid(pid, &status, WUNTRACED | WCONTINUED) || errno == ECHILD)
|
||||
{
|
||||
if (WIFSTOPPED(status))
|
||||
logger().warning(fmt::format("Child process was stopped by signal {}.", WSTOPSIG(status)));
|
||||
else if (WIFCONTINUED(status))
|
||||
logger().warning(fmt::format("Child process was continued."));
|
||||
else
|
||||
break;
|
||||
}
|
||||
else if (errno != EINTR)
|
||||
throw Poco::Exception("Cannot waitpid, errno: " + std::string(strerror(errno)));
|
||||
} while (true);
|
||||
|
||||
if (errno == ECHILD)
|
||||
{
|
||||
logger().information("Child process no longer exists.");
|
||||
_exit(status);
|
||||
}
|
||||
|
||||
if (WIFEXITED(status))
|
||||
{
|
||||
logger().information(fmt::format("Child process exited normally with code {}.", WEXITSTATUS(status)));
|
||||
_exit(status);
|
||||
}
|
||||
|
||||
if (WIFSIGNALED(status))
|
||||
{
|
||||
int sig = WTERMSIG(status);
|
||||
|
||||
if (sig == SIGKILL)
|
||||
{
|
||||
logger().fatal(fmt::format("Child process was terminated by signal {} (KILL)."
|
||||
" If it is not done by 'forcestop' command or manually,"
|
||||
" the possible cause is OOM Killer (see 'dmesg' and look at the '/var/log/kern.log' for the details).", sig));
|
||||
}
|
||||
else
|
||||
{
|
||||
logger().fatal(fmt::format("Child process was terminated by signal {}.", sig));
|
||||
|
||||
if (sig == SIGINT || sig == SIGTERM || sig == SIGQUIT)
|
||||
_exit(status);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
logger().fatal("Child process was not exited normally by unknown reason.");
|
||||
}
|
||||
|
||||
/// Automatic restart is not enabled but you can play with it.
|
||||
#if 1
|
||||
_exit(status);
|
||||
#else
|
||||
logger().information("Will restart.");
|
||||
if (argv0)
|
||||
memcpy(argv0, original_process_name.c_str(), original_process_name.size());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <chrono>
|
||||
#include <Poco/Process.h>
|
||||
#include <Poco/ThreadPool.h>
|
||||
#include <Poco/TaskNotification.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
#include <Poco/Util/ServerApplication.h>
|
||||
#include <Poco/Net/SocketAddress.h>
|
||||
@ -26,9 +25,6 @@
|
||||
#include <loggers/Loggers.h>
|
||||
|
||||
|
||||
namespace Poco { class TaskManager; }
|
||||
|
||||
|
||||
/// \brief Base class for applications that can run as daemons.
|
||||
///
|
||||
/// \code
|
||||
@ -52,31 +48,26 @@ public:
|
||||
BaseDaemon();
|
||||
~BaseDaemon() override;
|
||||
|
||||
/// Загружает конфигурацию и "строит" логгеры на запись в файлы
|
||||
/// Load configuration, prepare loggers, etc.
|
||||
void initialize(Poco::Util::Application &) override;
|
||||
|
||||
/// Читает конфигурацию
|
||||
void reloadConfiguration();
|
||||
|
||||
/// Определяет параметр командной строки
|
||||
/// Process command line parameters
|
||||
void defineOptions(Poco::Util::OptionSet & new_options) override;
|
||||
|
||||
/// Заставляет демон завершаться, если хотя бы одна задача завершилась неудачно
|
||||
void exitOnTaskError();
|
||||
/// Graceful shutdown
|
||||
static void terminate();
|
||||
|
||||
/// Завершение демона ("мягкое")
|
||||
void terminate();
|
||||
|
||||
/// Завершение демона ("жёсткое")
|
||||
/// Forceful shutdown
|
||||
void kill();
|
||||
|
||||
/// Получен ли сигнал на завершение?
|
||||
/// Cancellation request has been received.
|
||||
bool isCancelled() const
|
||||
{
|
||||
return is_cancelled;
|
||||
}
|
||||
|
||||
/// Получение ссылки на экземпляр демона
|
||||
static BaseDaemon & instance()
|
||||
{
|
||||
return dynamic_cast<BaseDaemon &>(Poco::Util::Application::instance());
|
||||
@ -85,12 +76,6 @@ public:
|
||||
/// return none if daemon doesn't exist, reference to the daemon otherwise
|
||||
static std::optional<std::reference_wrapper<BaseDaemon>> tryGetInstance() { return tryGetInstance<BaseDaemon>(); }
|
||||
|
||||
/// Спит заданное количество секунд или до события wakeup
|
||||
void sleep(double seconds);
|
||||
|
||||
/// Разбудить
|
||||
void wakeup();
|
||||
|
||||
/// В Graphite компоненты пути(папки) разделяются точкой.
|
||||
/// У нас принят путь формата root_path.hostname_yandex_ru.key
|
||||
/// root_path по умолчанию one_min
|
||||
@ -131,24 +116,23 @@ public:
|
||||
/// also doesn't close global internal pipes for signal handling
|
||||
static void closeFDs();
|
||||
|
||||
/// If this method is called after initialization and before run,
|
||||
/// will fork child process and setup watchdog that will print diagnostic info, if the child terminates.
|
||||
/// argv0 is needed to change process name (consequently, it is needed for scripts involving "pgrep", "pidof" to work correctly).
|
||||
void shouldSetupWatchdog(char * argv0_);
|
||||
|
||||
protected:
|
||||
/// Возвращает TaskManager приложения
|
||||
/// все методы task_manager следует вызывать из одного потока
|
||||
/// иначе возможен deadlock, т.к. joinAll выполняется под локом, а любой метод тоже берет лок
|
||||
Poco::TaskManager & getTaskManager() { return *task_manager; }
|
||||
|
||||
virtual void logRevision() const;
|
||||
|
||||
/// Используется при exitOnTaskError()
|
||||
void handleNotification(Poco::TaskFailedNotification *);
|
||||
|
||||
/// thread safe
|
||||
virtual void handleSignal(int signal_id);
|
||||
|
||||
/// initialize termination process and signal handlers
|
||||
virtual void initializeTerminationAndSignalProcessing();
|
||||
|
||||
/// реализация обработки сигналов завершения через pipe не требует блокировки сигнала с помощью sigprocmask во всех потоках
|
||||
/// fork the main process and watch if it was killed
|
||||
void setupWatchdog();
|
||||
|
||||
void waitForTerminationRequest()
|
||||
#if defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION >= 0x02000000 // in old upstream poco not vitrual
|
||||
override
|
||||
@ -162,21 +146,13 @@ protected:
|
||||
|
||||
virtual std::string getDefaultCorePath() const;
|
||||
|
||||
std::unique_ptr<Poco::TaskManager> task_manager;
|
||||
|
||||
std::optional<DB::StatusFile> pid;
|
||||
std::optional<DB::StatusFile> pid_file;
|
||||
|
||||
std::atomic_bool is_cancelled{false};
|
||||
|
||||
/// Флаг устанавливается по сообщению из Task (при аварийном завершении).
|
||||
bool task_failed = false;
|
||||
|
||||
bool log_to_console = false;
|
||||
|
||||
/// Событие, чтобы проснуться во время ожидания
|
||||
Poco::Event wakeup_event;
|
||||
|
||||
/// Поток, в котором принимается сигнал HUP/USR1 для закрытия логов.
|
||||
/// A thread that acts on HUP and USR1 signal (close logs).
|
||||
Poco::Thread signal_listener_thread;
|
||||
std::unique_ptr<Poco::Runnable> signal_listener;
|
||||
|
||||
@ -194,6 +170,9 @@ protected:
|
||||
String build_id_info;
|
||||
|
||||
std::vector<int> handled_signals;
|
||||
|
||||
bool should_setup_watchdog = false;
|
||||
char * argv0 = nullptr;
|
||||
};
|
||||
|
||||
|
||||
|
44
base/glibc-compatibility/musl/mkstemps.c
Normal file
44
base/glibc-compatibility/musl/mkstemps.c
Normal file
@ -0,0 +1,44 @@
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* This assumes that a check for the
|
||||
template size has already been made */
|
||||
static char * __randname(char * template)
|
||||
{
|
||||
int i;
|
||||
struct timespec ts;
|
||||
unsigned long r;
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
r = (ts.tv_nsec * 65537) ^ ((((intptr_t)(&ts)) / 16) + ((intptr_t)template));
|
||||
for (i = 0; i < 6; i++, r >>= 5)
|
||||
template[i] = 'A' + (r & 15) + (r & 16) * 2;
|
||||
|
||||
return template;
|
||||
}
|
||||
|
||||
int mkstemps(char * template, int len)
|
||||
{
|
||||
size_t l = strlen(template);
|
||||
if (l < 6 || len > l - 6 || memcmp(template + l - len - 6, "XXXXXX", 6))
|
||||
{
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int fd, retries = 100;
|
||||
do
|
||||
{
|
||||
__randname(template + l - len - 6);
|
||||
if ((fd = open(template, O_RDWR | O_CREAT | O_EXCL, 0600)) >= 0)
|
||||
return fd;
|
||||
} while (--retries && errno == EEXIST);
|
||||
|
||||
memcpy(template + l - len - 6, "XXXXXX", 6);
|
||||
return -1;
|
||||
}
|
@ -141,11 +141,6 @@ if(NOT EXTERNAL_PARQUET_FOUND AND NOT MISSING_INTERNAL_PARQUET_LIBRARY AND NOT O
|
||||
else()
|
||||
set(USE_INTERNAL_PARQUET_LIBRARY 1)
|
||||
|
||||
if(USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
|
||||
set(ARROW_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src")
|
||||
set(PARQUET_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src" ${ClickHouse_BINARY_DIR}/contrib/arrow/cpp/src)
|
||||
endif()
|
||||
|
||||
if(MAKE_STATIC_LIBRARIES)
|
||||
set(FLATBUFFERS_LIBRARY flatbuffers)
|
||||
set(ARROW_LIBRARY arrow_static)
|
||||
@ -155,9 +150,6 @@ if(NOT EXTERNAL_PARQUET_FOUND AND NOT MISSING_INTERNAL_PARQUET_LIBRARY AND NOT O
|
||||
set(FLATBUFFERS_LIBRARY flatbuffers_shared)
|
||||
set(ARROW_LIBRARY arrow_shared)
|
||||
set(PARQUET_LIBRARY parquet_shared)
|
||||
if(USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
|
||||
list(APPEND PARQUET_LIBRARY boost::regex)
|
||||
endif()
|
||||
set(THRIFT_LIBRARY thrift)
|
||||
endif()
|
||||
|
||||
|
53
contrib/CMakeLists.txt
vendored
53
contrib/CMakeLists.txt
vendored
@ -163,51 +163,21 @@ if(USE_INTERNAL_SNAPPY_LIBRARY)
|
||||
endif()
|
||||
|
||||
if (USE_INTERNAL_PARQUET_LIBRARY)
|
||||
if (USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
|
||||
# We dont use arrow's cmakefiles because they uses too many depends and download some libs in compile time
|
||||
# But this mode can be used for updating auto-generated parquet files:
|
||||
# cmake -DUSE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE=1 -DUSE_STATIC_LIBRARIES=0
|
||||
# copy {BUILD_DIR}/contrib/arrow/cpp/src/parquet/*.cpp,*.h -> /contrib/arrow-cmake/cpp/src/parquet/
|
||||
# But you can update auto-generated parquet files manually:
|
||||
# cd {BUILD_DIR}/contrib/arrow/cpp/src/parquet && mkdir -p build && cd build
|
||||
# cmake .. -DARROW_COMPUTE=ON -DARROW_PARQUET=ON -DARROW_SIMD_LEVEL=NONE -DARROW_VERBOSE_THIRDPARTY_BUILD=ON
|
||||
# -DARROW_BUILD_SHARED=1 -DARROW_BUILD_UTILITIES=OFF -DARROW_BUILD_INTEGRATION=OFF
|
||||
# -DBoost_FOUND=1 -DARROW_TEST_LINKAGE="shared"
|
||||
# make -j8
|
||||
# copy {BUILD_DIR}/contrib/arrow/cpp/src/parquet/*.cpp,*.h -> {BUILD_DIR}/contrib/arrow-cmake/cpp/src/parquet/
|
||||
|
||||
# Also useful parquet reader:
|
||||
# cd contrib/arrow/cpp/build && mkdir -p build && cmake .. -DPARQUET_BUILD_EXECUTABLES=1 && make -j8
|
||||
# contrib/arrow/cpp/build/debug/parquet-reader some_file.parquet
|
||||
# cd {BUILD_DIR}/contrib/arrow/cpp && mkdir -p build && cd build
|
||||
# cmake .. -DARROW_PARQUET=1 -DARROW_WITH_SNAPPY=1 -DPARQUET_BUILD_EXECUTABLES=1
|
||||
# make -j8
|
||||
# {BUILD_DIR}/contrib/arrow/cpp/build/release/parquet-reader some_file.parquet
|
||||
|
||||
set (ARROW_COMPUTE ON CACHE INTERNAL "")
|
||||
set (ARROW_PARQUET ON CACHE INTERNAL "")
|
||||
set (ARROW_VERBOSE_THIRDPARTY_BUILD ON CACHE INTERNAL "")
|
||||
set (ARROW_BUILD_SHARED 1 CACHE INTERNAL "")
|
||||
set (ARROW_BUILD_UTILITIES OFF CACHE INTERNAL "")
|
||||
set (ARROW_BUILD_INTEGRATION OFF CACHE INTERNAL "")
|
||||
set (ARROW_BOOST_HEADER_ONLY ON CACHE INTERNAL "")
|
||||
set (Boost_FOUND 1 CACHE INTERNAL "")
|
||||
if (MAKE_STATIC_LIBRARIES)
|
||||
set (PARQUET_ARROW_LINKAGE "static" CACHE INTERNAL "")
|
||||
set (ARROW_TEST_LINKAGE "static" CACHE INTERNAL "")
|
||||
set (ARROW_BUILD_STATIC ${MAKE_STATIC_LIBRARIES} CACHE INTERNAL "")
|
||||
else ()
|
||||
set (PARQUET_ARROW_LINKAGE "shared" CACHE INTERNAL "")
|
||||
set (ARROW_TEST_LINKAGE "shared" CACHE INTERNAL "")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
|
||||
set (_save_build_type ${CMAKE_BUILD_TYPE})
|
||||
set (CMAKE_BUILD_TYPE Release)
|
||||
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
endif ()
|
||||
|
||||
# Because Arrow uses CMAKE_SOURCE_DIR as a project path
|
||||
# Hopefully will be fixed in https://github.com/apache/arrow/pull/2676
|
||||
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/cmake_modules")
|
||||
add_subdirectory (arrow/cpp)
|
||||
|
||||
if (_save_build_type)
|
||||
set (CMAKE_BUILD_TYPE ${_save_build_type})
|
||||
unset (_save_build_type)
|
||||
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
endif ()
|
||||
|
||||
else()
|
||||
add_subdirectory(arrow-cmake)
|
||||
|
||||
# The library is large - avoid bloat.
|
||||
@ -215,7 +185,6 @@ else()
|
||||
target_compile_options (${THRIFT_LIBRARY} PRIVATE -g0)
|
||||
target_compile_options (${PARQUET_LIBRARY} PRIVATE -g0)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (USE_INTERNAL_AVRO_LIBRARY)
|
||||
add_subdirectory(avro-cmake)
|
||||
|
2
contrib/arrow
vendored
2
contrib/arrow
vendored
@ -1 +1 @@
|
||||
Subproject commit 3cbcb7b62c2f2d02851bff837758637eb592a64b
|
||||
Subproject commit 744bdfe188f018e5e05f5deebd4e9ee0a7706cf4
|
@ -144,15 +144,16 @@ set(ORC_SRCS
|
||||
|
||||
set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/arrow)
|
||||
|
||||
configure_file("${LIBRARY_DIR}/util/config.h.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cpp/src/arrow/util/config.h")
|
||||
configure_file("${LIBRARY_DIR}/util/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/cpp/src/arrow/util/config.h")
|
||||
|
||||
# arrow/cpp/src/arrow/CMakeLists.txt
|
||||
set(ARROW_SRCS
|
||||
${LIBRARY_DIR}/array.cc
|
||||
${LIBRARY_DIR}/buffer.cc
|
||||
${LIBRARY_DIR}/device.cc
|
||||
${LIBRARY_DIR}/builder.cc
|
||||
${LIBRARY_DIR}/chunked_array.cc
|
||||
${LIBRARY_DIR}/compare.cc
|
||||
${LIBRARY_DIR}/datum.cc
|
||||
${LIBRARY_DIR}/device.cc
|
||||
${LIBRARY_DIR}/extension_type.cc
|
||||
${LIBRARY_DIR}/memory_pool.cc
|
||||
${LIBRARY_DIR}/pretty_print.cc
|
||||
@ -167,11 +168,12 @@ set(ARROW_SRCS
|
||||
${LIBRARY_DIR}/type.cc
|
||||
${LIBRARY_DIR}/visitor.cc
|
||||
|
||||
${LIBRARY_DIR}/tensor/coo_converter.cc
|
||||
${LIBRARY_DIR}/tensor/csc_converter.cc
|
||||
${LIBRARY_DIR}/tensor/csf_converter.cc
|
||||
${LIBRARY_DIR}/tensor/csr_converter.cc
|
||||
|
||||
${LIBRARY_DIR}/array/array_base.cc
|
||||
${LIBRARY_DIR}/array/array_binary.cc
|
||||
${LIBRARY_DIR}/array/array_decimal.cc
|
||||
${LIBRARY_DIR}/array/array_dict.cc
|
||||
${LIBRARY_DIR}/array/array_nested.cc
|
||||
${LIBRARY_DIR}/array/array_primitive.cc
|
||||
${LIBRARY_DIR}/array/builder_adaptive.cc
|
||||
${LIBRARY_DIR}/array/builder_base.cc
|
||||
${LIBRARY_DIR}/array/builder_binary.cc
|
||||
@ -181,17 +183,50 @@ set(ARROW_SRCS
|
||||
${LIBRARY_DIR}/array/builder_primitive.cc
|
||||
${LIBRARY_DIR}/array/builder_union.cc
|
||||
${LIBRARY_DIR}/array/concatenate.cc
|
||||
${LIBRARY_DIR}/array/dict_internal.cc
|
||||
${LIBRARY_DIR}/array/data.cc
|
||||
${LIBRARY_DIR}/array/diff.cc
|
||||
${LIBRARY_DIR}/array/util.cc
|
||||
${LIBRARY_DIR}/array/validate.cc
|
||||
|
||||
${LIBRARY_DIR}/csv/converter.cc
|
||||
${LIBRARY_DIR}/compute/api_scalar.cc
|
||||
${LIBRARY_DIR}/compute/api_vector.cc
|
||||
${LIBRARY_DIR}/compute/cast.cc
|
||||
${LIBRARY_DIR}/compute/exec.cc
|
||||
${LIBRARY_DIR}/compute/function.cc
|
||||
${LIBRARY_DIR}/compute/kernel.cc
|
||||
${LIBRARY_DIR}/compute/registry.cc
|
||||
|
||||
${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc
|
||||
${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc
|
||||
${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc
|
||||
${LIBRARY_DIR}/compute/kernels/codegen_internal.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_cast_nested.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_cast_numeric.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_cast_string.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_cast_temporal.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_compare.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_fill_null.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_nested.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_set_lookup.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_string.cc
|
||||
${LIBRARY_DIR}/compute/kernels/scalar_validity.cc
|
||||
${LIBRARY_DIR}/compute/kernels/vector_hash.cc
|
||||
${LIBRARY_DIR}/compute/kernels/vector_nested.cc
|
||||
${LIBRARY_DIR}/compute/kernels/vector_selection.cc
|
||||
${LIBRARY_DIR}/compute/kernels/vector_sort.cc
|
||||
${LIBRARY_DIR}/compute/kernels/util_internal.cc
|
||||
|
||||
${LIBRARY_DIR}/csv/chunker.cc
|
||||
${LIBRARY_DIR}/csv/column_builder.cc
|
||||
${LIBRARY_DIR}/csv/column_decoder.cc
|
||||
${LIBRARY_DIR}/csv/converter.cc
|
||||
${LIBRARY_DIR}/csv/options.cc
|
||||
${LIBRARY_DIR}/csv/parser.cc
|
||||
${LIBRARY_DIR}/csv/reader.cc
|
||||
${LIBRARY_DIR}/csv/column_decoder.cc
|
||||
|
||||
${LIBRARY_DIR}/ipc/dictionary.cc
|
||||
${LIBRARY_DIR}/ipc/feather.cc
|
||||
@ -202,14 +237,25 @@ set(ARROW_SRCS
|
||||
${LIBRARY_DIR}/ipc/writer.cc
|
||||
|
||||
${LIBRARY_DIR}/io/buffered.cc
|
||||
${LIBRARY_DIR}/io/caching.cc
|
||||
${LIBRARY_DIR}/io/compressed.cc
|
||||
${LIBRARY_DIR}/io/file.cc
|
||||
${LIBRARY_DIR}/io/interfaces.cc
|
||||
${LIBRARY_DIR}/io/memory.cc
|
||||
${LIBRARY_DIR}/io/slow.cc
|
||||
|
||||
${LIBRARY_DIR}/tensor/coo_converter.cc
|
||||
${LIBRARY_DIR}/tensor/csf_converter.cc
|
||||
${LIBRARY_DIR}/tensor/csx_converter.cc
|
||||
|
||||
${LIBRARY_DIR}/util/basic_decimal.cc
|
||||
${LIBRARY_DIR}/util/bit_block_counter.cc
|
||||
${LIBRARY_DIR}/util/bit_run_reader.cc
|
||||
${LIBRARY_DIR}/util/bit_util.cc
|
||||
${LIBRARY_DIR}/util/bitmap.cc
|
||||
${LIBRARY_DIR}/util/bitmap_builders.cc
|
||||
${LIBRARY_DIR}/util/bitmap_ops.cc
|
||||
${LIBRARY_DIR}/util/bpacking.cc
|
||||
${LIBRARY_DIR}/util/compression.cc
|
||||
${LIBRARY_DIR}/util/compression_lz4.cc
|
||||
${LIBRARY_DIR}/util/compression_snappy.cc
|
||||
@ -217,8 +263,12 @@ set(ARROW_SRCS
|
||||
${LIBRARY_DIR}/util/compression_zstd.cc
|
||||
${LIBRARY_DIR}/util/cpu_info.cc
|
||||
${LIBRARY_DIR}/util/decimal.cc
|
||||
${LIBRARY_DIR}/util/delimiting.cc
|
||||
${LIBRARY_DIR}/util/formatting.cc
|
||||
${LIBRARY_DIR}/util/future.cc
|
||||
${LIBRARY_DIR}/util/int_util.cc
|
||||
${LIBRARY_DIR}/util/io_util.cc
|
||||
${LIBRARY_DIR}/util/iterator.cc
|
||||
${LIBRARY_DIR}/util/key_value_metadata.cc
|
||||
${LIBRARY_DIR}/util/logging.cc
|
||||
${LIBRARY_DIR}/util/memory.cc
|
||||
@ -226,27 +276,15 @@ set(ARROW_SRCS
|
||||
${LIBRARY_DIR}/util/string.cc
|
||||
${LIBRARY_DIR}/util/task_group.cc
|
||||
${LIBRARY_DIR}/util/thread_pool.cc
|
||||
${LIBRARY_DIR}/util/time.cc
|
||||
${LIBRARY_DIR}/util/trie.cc
|
||||
${LIBRARY_DIR}/util/utf8.cc
|
||||
${LIBRARY_DIR}/util/future.cc
|
||||
${LIBRARY_DIR}/util/formatting.cc
|
||||
${LIBRARY_DIR}/util/parsing.cc
|
||||
${LIBRARY_DIR}/util/time.cc
|
||||
${LIBRARY_DIR}/util/delimiting.cc
|
||||
${LIBRARY_DIR}/util/iterator.cc
|
||||
${LIBRARY_DIR}/util/value_parsing.cc
|
||||
|
||||
${LIBRARY_DIR}/vendored/base64.cpp
|
||||
${ORC_SRCS}
|
||||
)
|
||||
|
||||
set(ARROW_SRCS ${ARROW_SRCS}
|
||||
${LIBRARY_DIR}/compute/context.cc
|
||||
${LIBRARY_DIR}/compute/kernels/boolean.cc
|
||||
${LIBRARY_DIR}/compute/kernels/cast.cc
|
||||
${LIBRARY_DIR}/compute/kernels/hash.cc
|
||||
${LIBRARY_DIR}/compute/kernels/util_internal.cc
|
||||
)
|
||||
|
||||
if (SNAPPY_INCLUDE_DIR AND SNAPPY_LIBRARY)
|
||||
set(ARROW_WITH_SNAPPY 1)
|
||||
endif ()
|
||||
@ -289,7 +327,8 @@ if (USE_INTERNAL_PROTOBUF_LIBRARY)
|
||||
add_dependencies(${ARROW_LIBRARY} protoc)
|
||||
endif ()
|
||||
|
||||
target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cpp/src)
|
||||
target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src)
|
||||
target_include_directories(${ARROW_LIBRARY} SYSTEM PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/cpp/src)
|
||||
target_link_libraries(${ARROW_LIBRARY} PRIVATE ${DOUBLE_CONVERSION_LIBRARIES} ${Protobuf_LIBRARY})
|
||||
target_link_libraries(${ARROW_LIBRARY} PRIVATE lz4)
|
||||
if (ARROW_WITH_SNAPPY)
|
||||
@ -319,19 +358,26 @@ set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/parquet)
|
||||
set(GEN_LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src/generated)
|
||||
# arrow/cpp/src/parquet/CMakeLists.txt
|
||||
set(PARQUET_SRCS
|
||||
${LIBRARY_DIR}/arrow/path_internal.cc
|
||||
${LIBRARY_DIR}/arrow/reader.cc
|
||||
${LIBRARY_DIR}/arrow/reader_internal.cc
|
||||
${LIBRARY_DIR}/arrow/schema.cc
|
||||
${LIBRARY_DIR}/arrow/schema_internal.cc
|
||||
${LIBRARY_DIR}/arrow/writer.cc
|
||||
${LIBRARY_DIR}/arrow/path_internal.cc
|
||||
${LIBRARY_DIR}/bloom_filter.cc
|
||||
${LIBRARY_DIR}/column_reader.cc
|
||||
${LIBRARY_DIR}/column_scanner.cc
|
||||
${LIBRARY_DIR}/column_writer.cc
|
||||
${LIBRARY_DIR}/deprecated_io.cc
|
||||
${LIBRARY_DIR}/encoding.cc
|
||||
${LIBRARY_DIR}/encryption.cc
|
||||
${LIBRARY_DIR}/encryption_internal.cc
|
||||
${LIBRARY_DIR}/file_reader.cc
|
||||
${LIBRARY_DIR}/file_writer.cc
|
||||
${LIBRARY_DIR}/internal_file_decryptor.cc
|
||||
${LIBRARY_DIR}/internal_file_encryptor.cc
|
||||
${LIBRARY_DIR}/level_conversion.cc
|
||||
${LIBRARY_DIR}/level_comparison.cc
|
||||
${LIBRARY_DIR}/metadata.cc
|
||||
${LIBRARY_DIR}/murmur3.cc
|
||||
${LIBRARY_DIR}/platform.cc
|
||||
@ -340,10 +386,6 @@ set(PARQUET_SRCS
|
||||
${LIBRARY_DIR}/schema.cc
|
||||
${LIBRARY_DIR}/statistics.cc
|
||||
${LIBRARY_DIR}/types.cc
|
||||
${LIBRARY_DIR}/encryption.cc
|
||||
${LIBRARY_DIR}/encryption_internal.cc
|
||||
${LIBRARY_DIR}/internal_file_decryptor.cc
|
||||
${LIBRARY_DIR}/internal_file_encryptor.cc
|
||||
|
||||
${GEN_LIBRARY_DIR}/parquet_constants.cpp
|
||||
${GEN_LIBRARY_DIR}/parquet_types.cpp
|
||||
|
@ -1,26 +0,0 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#define ARROW_VERSION_MAJOR
|
||||
#define ARROW_VERSION_MINOR
|
||||
#define ARROW_VERSION_PATCH
|
||||
#define ARROW_VERSION ((ARROW_VERSION_MAJOR * 1000) + ARROW_VERSION_MINOR) * 1000 + ARROW_VERSION_PATCH
|
||||
|
||||
#define ARROW_SO_VERSION ""
|
||||
#define ARROW_FULL_SO_VERSION ""
|
||||
|
||||
/* #undef GRPCPP_PP_INCLUDE */
|
@ -22,8 +22,8 @@
|
||||
#define PARQUET_VERSION_MINOR 5
|
||||
#define PARQUET_VERSION_PATCH 1
|
||||
|
||||
#define PARQUET_SO_VERSION 0
|
||||
#define PARQUET_FULL_SO_VERSION 0.17
|
||||
#define PARQUET_SO_VERSION "200"
|
||||
#define PARQUET_FULL_SO_VERSION "200.0.0"
|
||||
|
||||
// define the parquet created by version
|
||||
#define CREATED_BY_VERSION "parquet-cpp version 1.5.1-SNAPSHOT"
|
||||
|
@ -54,6 +54,26 @@ else ()
|
||||
set(CARES_SHARED ON CACHE BOOL "" FORCE)
|
||||
endif ()
|
||||
|
||||
# Disable looking for libnsl on a platforms that has gethostbyname in glibc
|
||||
#
|
||||
# c-ares searching for gethostbyname in the libnsl library, however in the
|
||||
# version that shipped with gRPC it doing it wrong [1], since it uses
|
||||
# CHECK_LIBRARY_EXISTS(), which will return TRUE even if the function exists in
|
||||
# another dependent library. The upstream already contains correct macro [2],
|
||||
# but it is not included in gRPC (even upstream gRPC, not the one that is
|
||||
# shipped with clickhousee).
|
||||
#
|
||||
# [1]: https://github.com/c-ares/c-ares/blob/e982924acee7f7313b4baa4ee5ec000c5e373c30/CMakeLists.txt#L125
|
||||
# [2]: https://github.com/c-ares/c-ares/blob/44fbc813685a1fa8aa3f27fcd7544faf612d376a/CMakeLists.txt#L146
|
||||
#
|
||||
# And because if you by some reason have libnsl [3] installed, clickhouse will
|
||||
# reject to start w/o it. While this is completelly different library.
|
||||
#
|
||||
# [3]: https://packages.debian.org/bullseye/libnsl2
|
||||
if (NOT CMAKE_SYSTEM_NAME STREQUAL "SunOS")
|
||||
set(HAVE_LIBNSL OFF CACHE BOOL "" FORCE)
|
||||
endif()
|
||||
|
||||
# We don't want to build C# extensions.
|
||||
set(gRPC_BUILD_CSHARP_EXT OFF)
|
||||
|
||||
|
2
contrib/jemalloc
vendored
2
contrib/jemalloc
vendored
@ -1 +1 @@
|
||||
Subproject commit 93e27e435cac846028da20cd9b0841fbc9110bd2
|
||||
Subproject commit e6891d9746143bf2cf617493d880ba5a0b9a3efd
|
3
debian/clickhouse-test.install
vendored
3
debian/clickhouse-test.install
vendored
@ -1,5 +1,2 @@
|
||||
usr/bin/clickhouse-test
|
||||
usr/bin/clickhouse-test-server
|
||||
usr/share/clickhouse-test/*
|
||||
etc/clickhouse-client/client-test.xml
|
||||
etc/clickhouse-server/server-test.xml
|
||||
|
4
debian/rules
vendored
4
debian/rules
vendored
@ -62,7 +62,7 @@ ifndef DISABLE_NINJA
|
||||
NINJA=$(shell which ninja)
|
||||
ifneq ($(NINJA),)
|
||||
CMAKE_FLAGS += -GNinja
|
||||
export MAKE=$(NINJA)
|
||||
export MAKE=$(NINJA) $(NINJA_FLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -93,7 +93,7 @@ override_dh_auto_build:
|
||||
|
||||
override_dh_auto_test:
|
||||
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
|
||||
cd $(BUILDDIR) && ctest $(THREADS_COUNT) -V -E with_server
|
||||
cd $(BUILDDIR) && ctest $(THREADS_COUNT) -V
|
||||
endif
|
||||
|
||||
override_dh_clean:
|
||||
|
@ -47,13 +47,13 @@ cp "${DOCKER_BUILD_FOLDER}/entrypoint.alpine.sh" "${CONTAINER_ROOT_FOLDER}/
|
||||
## get glibc components from ubuntu 20.04 and put them to expected place
|
||||
docker pull ubuntu:20.04
|
||||
ubuntu20image=$(docker create --rm ubuntu:20.04)
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L ${ubuntu20image}:/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libc.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libdl.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libm.so.6 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libpthread.so.0 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/librt.so.1 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libnss_dns.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib/x86_64-linux-gnu/libresolv.so.2 "${CONTAINER_ROOT_FOLDER}/lib"
|
||||
docker cp -L "${ubuntu20image}":/lib64/ld-linux-x86-64.so.2 "${CONTAINER_ROOT_FOLDER}/lib64"
|
||||
|
||||
docker build "$DOCKER_BUILD_FOLDER" -f Dockerfile.alpine -t "yandex/clickhouse-server:${VERSION}-alpine" --pull
|
@ -26,17 +26,17 @@ fi
|
||||
CLICKHOUSE_CONFIG="${CLICKHOUSE_CONFIG:-/etc/clickhouse-server/config.xml}"
|
||||
|
||||
# port is needed to check if clickhouse-server is ready for connections
|
||||
HTTP_PORT="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=http_port)"
|
||||
HTTP_PORT="$(clickhouse extract-from-config --config-file "${CLICKHOUSE_CONFIG}" --key=http_port)"
|
||||
|
||||
# get CH directories locations
|
||||
DATA_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=path || true)"
|
||||
TMP_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=tmp_path || true)"
|
||||
USER_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=user_files_path || true)"
|
||||
LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.log || true)"
|
||||
LOG_DIR="$(dirname $LOG_PATH || true)"
|
||||
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.errorlog || true)"
|
||||
ERROR_LOG_DIR="$(dirname $ERROR_LOG_PATH || true)"
|
||||
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=format_schema_path || true)"
|
||||
DATA_DIR="$(clickhouse extract-from-config --config-file "${CLICKHOUSE_CONFIG}" --key=path || true)"
|
||||
TMP_DIR="$(clickhouse extract-from-config --config-file "${CLICKHOUSE_CONFIG}" --key=tmp_path || true)"
|
||||
USER_PATH="$(clickhouse extract-from-config --config-file "${CLICKHOUSE_CONFIG}" --key=user_files_path || true)"
|
||||
LOG_PATH="$(clickhouse extract-from-config --config-file "${CLICKHOUSE_CONFIG}" --key=logger.log || true)"
|
||||
LOG_DIR="$(dirname "${LOG_PATH}" || true)"
|
||||
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file "${CLICKHOUSE_CONFIG}" --key=logger.errorlog || true)"
|
||||
ERROR_LOG_DIR="$(dirname "${ERROR_LOG_PATH}" || true)"
|
||||
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file "${CLICKHOUSE_CONFIG}" --key=format_schema_path || true)"
|
||||
|
||||
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
|
||||
CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-}"
|
||||
@ -92,7 +92,7 @@ fi
|
||||
|
||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
# Listen only on localhost until the initialization is done
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG -- --listen_host=127.0.0.1 &
|
||||
$gosu /usr/bin/clickhouse-server --config-file="${CLICKHOUSE_CONFIG}" -- --listen_host=127.0.0.1 &
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
@ -107,7 +107,7 @@ if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ ! -z "$CLICKHOUSE_PASSWORD" ]; then
|
||||
if [ -n "$CLICKHOUSE_PASSWORD" ]; then
|
||||
printf -v WITH_PASSWORD '%s %q' "--password" "$CLICKHOUSE_PASSWORD"
|
||||
fi
|
||||
|
||||
@ -130,7 +130,7 @@ if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||
. "$f"
|
||||
fi
|
||||
;;
|
||||
*.sql) echo "$0: running $f"; cat "$f" | "$clickhouseclient" ; echo ;;
|
||||
*.sql) echo "$0: running $f"; "$clickhouseclient" < "$f" ; echo ;;
|
||||
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "$clickhouseclient"; echo ;;
|
||||
*) echo "$0: ignoring $f" ;;
|
||||
esac
|
||||
@ -145,7 +145,7 @@ fi
|
||||
|
||||
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
exec $gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG "$@"
|
||||
exec $gosu /usr/bin/clickhouse-server --config-file="${CLICKHOUSE_CONFIG}" "$@"
|
||||
fi
|
||||
|
||||
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
|
||||
|
@ -12,7 +12,32 @@ dpkg -i package_folder/clickhouse-test_*.deb
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
service clickhouse-server start && sleep 5
|
||||
# For flaky check we also enable thread fuzzer
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
|
||||
# simpliest way to forward env variables to server
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||
sleep 5
|
||||
else
|
||||
service clickhouse-server start && sleep 5
|
||||
fi
|
||||
|
||||
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
|
33
docs/_description_templates/template-server-setting.md
Normal file
33
docs/_description_templates/template-server-setting.md
Normal file
@ -0,0 +1,33 @@
|
||||
## server_setting_name {#server_setting_name}
|
||||
|
||||
Description.
|
||||
|
||||
Describe what is configured in this section of settings.
|
||||
|
||||
Possible value: ...
|
||||
|
||||
Default value: ...
|
||||
|
||||
Settings: (Optional)
|
||||
|
||||
If the section contains several settings, list them here. Specify possible values and default values:
|
||||
|
||||
- setting_1 — Description.
|
||||
- setting_2 — Description.
|
||||
|
||||
**Example:**
|
||||
|
||||
```xml
|
||||
<server_setting_name>
|
||||
<setting_1> ... </setting_1>
|
||||
<setting_2> ... </setting_2>
|
||||
</server_setting_name>
|
||||
```
|
||||
|
||||
**Additional Info** (Optional)
|
||||
|
||||
The name of an additional section can be any, for example, **Usage**.
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
- [link](#)
|
@ -2447,7 +2447,6 @@ Result:
|
||||
{"number":"2"}
|
||||
```
|
||||
|
||||
=======
|
||||
## allow_nullable_key {#allow-nullable-key}
|
||||
|
||||
Allows using of the [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable)-typed values in a sorting and a primary key for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) tables.
|
||||
|
@ -29,12 +29,12 @@ These actions are described in detail below.
|
||||
## ADD COLUMN {#alter_add-column}
|
||||
|
||||
``` sql
|
||||
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after]
|
||||
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST]
|
||||
```
|
||||
|
||||
Adds a new column to the table with the specified `name`, `type`, [`codec`](../../../sql-reference/statements/create/table.md#codecs) and `default_expr` (see the section [Default expressions](../../../sql-reference/statements/create/table.md#create-default-values)).
|
||||
|
||||
If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. Otherwise, the column is added to the end of the table. Note that there is no way to add a column to the beginning of a table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions.
|
||||
If the `IF NOT EXISTS` clause is included, the query won’t return an error if the column already exists. If you specify `AFTER name_after` (the name of another column), the column is added after the specified one in the list of table columns. If you want to add a column to the beginning of the table use the `FIRST` clause. Otherwise, the column is added to the end of the table. For a chain of actions, `name_after` can be the name of a column that is added in one of the previous actions.
|
||||
|
||||
Adding a column just changes the table structure, without performing any actions with data. The data doesn’t appear on the disk after `ALTER`. If the data is missing for a column when reading from the table, it is filled in with default values (by performing the default expression if there is one, or using zeros or empty strings). The column appears on the disk after merging data parts (see [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md)).
|
||||
|
||||
@ -43,9 +43,24 @@ This approach allows us to complete the `ALTER` query instantly, without increas
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE visits ADD COLUMN browser String AFTER user_id
|
||||
ALTER TABLE alter_test ADD COLUMN Added1 UInt32 FIRST;
|
||||
ALTER TABLE alter_test ADD COLUMN Added2 UInt32 AFTER NestedColumn;
|
||||
ALTER TABLE alter_test ADD COLUMN Added3 UInt32 AFTER ToDrop;
|
||||
DESC alter_test FORMAT TSV;
|
||||
```
|
||||
|
||||
``` text
|
||||
Added1 UInt32
|
||||
CounterID UInt32
|
||||
StartDate Date
|
||||
UserID UInt32
|
||||
VisitID UInt32
|
||||
NestedColumn.A Array(UInt8)
|
||||
NestedColumn.S Array(String)
|
||||
Added2 UInt32
|
||||
ToDrop UInt32
|
||||
Added3 UInt32
|
||||
```
|
||||
## DROP COLUMN {#alter_drop-column}
|
||||
|
||||
``` sql
|
||||
@ -99,7 +114,7 @@ ALTER TABLE visits COMMENT COLUMN browser 'The table shows the browser used for
|
||||
## MODIFY COLUMN {#alter_modify-column}
|
||||
|
||||
``` sql
|
||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL]
|
||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] [AFTER name_after | FIRST]
|
||||
```
|
||||
|
||||
This query changes the `name` column properties:
|
||||
@ -114,6 +129,8 @@ This query changes the `name` column properties:
|
||||
|
||||
If the `IF EXISTS` clause is specified, the query won’t return an error if the column doesn’t exist.
|
||||
|
||||
The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#alter_add-column) description.
|
||||
|
||||
When changing the type, values are converted as if the [toType](../../../sql-reference/functions/type-conversion-functions.md) functions were applied to them. If only the default expression is changed, the query doesn’t do anything complex, and is completed almost instantly.
|
||||
|
||||
Example:
|
||||
@ -124,15 +141,7 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String)
|
||||
|
||||
Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time.
|
||||
|
||||
There are several processing stages:
|
||||
|
||||
- Preparing temporary (new) files with modified data.
|
||||
- Renaming old files.
|
||||
- Renaming the temporary (new) files to the old names.
|
||||
- Deleting the old files.
|
||||
|
||||
Only the first stage takes time. If there is a failure at this stage, the data is not changed.
|
||||
If there is a failure during one of the successive stages, data can be restored manually. The exception is if the old files were deleted from the file system but the data for the new files did not get written to the disk and was lost.
|
||||
The `ALTER` query is atomic. For MergeTree tables it is also lock-free.
|
||||
|
||||
The `ALTER` query for changing columns is replicated. The instructions are saved in ZooKeeper, then each replica applies them. All `ALTER` queries are run in the same order. The query waits for the appropriate actions to be completed on the other replicas. However, a query to change columns in a replicated table can be interrupted, and all actions will be performed asynchronously.
|
||||
|
||||
|
@ -9,7 +9,6 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
|
||||
|
||||
Поддерживаемые форматы и возможность использовать их в запросах `INSERT` и `SELECT` перечислены в таблице ниже.
|
||||
|
||||
=======
|
||||
| Формат | INSERT | SELECT |
|
||||
|-----------------------------------------------------------------------------------------|--------|--------|
|
||||
| [TabSeparated](#tabseparated) | ✔ | ✔ |
|
||||
|
@ -18,12 +18,12 @@ toc_title: "\u041c\u0430\u043d\u0438\u043f\u0443\u043b\u044f\u0446\u0438\u0438\u
|
||||
## ADD COLUMN {#alter_add-column}
|
||||
|
||||
``` sql
|
||||
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after]
|
||||
ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after | FIRST]
|
||||
```
|
||||
|
||||
Добавляет в таблицу новый столбец с именем `name`, типом `type`, [кодеком](../create/table.md#codecs) `codec` и выражением для умолчания `default_expr` (смотрите раздел [Значения по умолчанию](../create/index.md#create-default-values)).
|
||||
|
||||
Если указано `IF NOT EXISTS`, запрос не будет возвращать ошибку, если столбец уже существует. Если указано `AFTER name_after` (имя другого столбца), то столбец добавляется (в список столбцов таблицы) после указанного. Иначе, столбец добавляется в конец таблицы. Обратите внимание, ClickHouse не позволяет добавлять столбцы в начало таблицы. Для цепочки действий, `name_after` может быть именем столбца, который добавляется в одном из предыдущих действий.
|
||||
Если указано `IF NOT EXISTS`, запрос не будет возвращать ошибку, если столбец уже существует. Если указано `AFTER name_after` (имя другого столбца), то столбец добавляется (в список столбцов таблицы) после указанного. Если вы хотите добавить столбец в начало таблицы, используйте `FIRST`. Иначе столбец добавляется в конец таблицы. Для цепочки действий `name_after` может быть именем столбца, который добавляется в одном из предыдущих действий.
|
||||
|
||||
Добавление столбца всего лишь меняет структуру таблицы, и не производит никаких действий с данными - соответствующие данные не появляются на диске после ALTER-а. При чтении из таблицы, если для какого-либо столбца отсутствуют данные, то он заполняется значениями по умолчанию (выполняя выражение по умолчанию, если такое есть, или нулями, пустыми строками). Также, столбец появляется на диске при слиянии кусков данных (см. [MergeTree](../../../sql-reference/statements/alter/index.md)).
|
||||
|
||||
@ -32,7 +32,23 @@ ADD COLUMN [IF NOT EXISTS] name [type] [default_expr] [codec] [AFTER name_after]
|
||||
Пример:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE visits ADD COLUMN browser String AFTER user_id
|
||||
ALTER TABLE alter_test ADD COLUMN Added1 UInt32 FIRST;
|
||||
ALTER TABLE alter_test ADD COLUMN Added2 UInt32 AFTER NestedColumn;
|
||||
ALTER TABLE alter_test ADD COLUMN Added3 UInt32 AFTER ToDrop;
|
||||
DESC alter_test FORMAT TSV;
|
||||
```
|
||||
|
||||
``` text
|
||||
Added1 UInt32
|
||||
CounterID UInt32
|
||||
StartDate Date
|
||||
UserID UInt32
|
||||
VisitID UInt32
|
||||
NestedColumn.A Array(UInt8)
|
||||
NestedColumn.S Array(String)
|
||||
Added2 UInt32
|
||||
ToDrop UInt32
|
||||
Added3 UInt32
|
||||
```
|
||||
|
||||
## DROP COLUMN {#alter_drop-column}
|
||||
@ -88,7 +104,7 @@ ALTER TABLE visits COMMENT COLUMN browser 'Столбец показывает,
|
||||
## MODIFY COLUMN {#alter_modify-column}
|
||||
|
||||
``` sql
|
||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL]
|
||||
MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL] [AFTER name_after | FIRST]
|
||||
```
|
||||
|
||||
Запрос изменяет следующие свойства столбца `name`:
|
||||
@ -103,6 +119,8 @@ MODIFY COLUMN [IF EXISTS] name [type] [default_expr] [TTL]
|
||||
|
||||
Если указано `IF EXISTS`, запрос не возвращает ошибку, если столбца не существует.
|
||||
|
||||
Запрос также может изменять порядок столбцов при помощи `FIRST | AFTER`, смотрите описание [ADD COLUMN](#alter_add-column).
|
||||
|
||||
При изменении типа, значения преобразуются так, как если бы к ним была применена функция [toType](../../../sql-reference/statements/alter/index.md). Если изменяется только выражение для умолчания, запрос не делает никакой сложной работы и выполняется мгновенно.
|
||||
|
||||
Пример запроса:
|
||||
@ -113,15 +131,7 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String)
|
||||
|
||||
Изменение типа столбца - это единственное действие, которое выполняет сложную работу - меняет содержимое файлов с данными. Для больших таблиц, выполнение может занять длительное время.
|
||||
|
||||
Выполнение производится в несколько стадий:
|
||||
|
||||
- подготовка временных (новых) файлов с изменёнными данными;
|
||||
- переименование старых файлов;
|
||||
- переименование временных (новых) файлов в старые;
|
||||
- удаление старых файлов.
|
||||
|
||||
Из них, длительной является только первая стадия. Если на этой стадии возникнет сбой, то данные не поменяются.
|
||||
Если на одной из следующих стадий возникнет сбой, то данные будет можно восстановить вручную. За исключением случаев, когда старые файлы удалены из файловой системы, а данные для новых файлов не доехали на диск и потеряны.
|
||||
Выполнение запроса ALTER атомарно.
|
||||
|
||||
Запрос `ALTER` на изменение столбцов реплицируется. Соответствующие инструкции сохраняются в ZooKeeper, и затем каждая реплика их применяет. Все запросы `ALTER` выполняются в одном и том же порядке. Запрос ждёт выполнения соответствующих действий на всех репликах. Но при этом, запрос на изменение столбцов в реплицируемой таблице можно прервать, и все действия будут осуществлены асинхронно.
|
||||
|
||||
|
@ -28,8 +28,8 @@ Follow the instructions on it's official website: <https://wkhtmltopdf.org/downl
|
||||
|
||||
#### 2. Install CLI tools from npm
|
||||
|
||||
1. `apt-get install npm` for Debian/Ubuntu or `brew install npm` on Mac OS X.
|
||||
2. `npm install -g purifycss amphtml-validator`.
|
||||
1. `sudo apt-get install npm` for Debian/Ubuntu or `brew install npm` on Mac OS X.
|
||||
2. `sudo npm install -g purify-css amphtml-validator`.
|
||||
|
||||
#### 3. Set up virtualenv
|
||||
|
||||
|
@ -48,11 +48,6 @@ def build_for_lang(lang, args):
|
||||
logging.info(f'Building {lang} docs')
|
||||
os.environ['SINGLE_PAGE'] = '0'
|
||||
|
||||
config_path = os.path.join(args.docs_dir, f'toc_{lang}.yml')
|
||||
if args.is_stable_release and not os.path.exists(config_path):
|
||||
logging.warning(f'Skipping {lang} docs, because {config} does not exist')
|
||||
return
|
||||
|
||||
try:
|
||||
theme_cfg = {
|
||||
'name': None,
|
||||
@ -73,9 +68,7 @@ def build_for_lang(lang, args):
|
||||
'es': 'Español',
|
||||
'fr': 'Français',
|
||||
'ru': 'Русский',
|
||||
'ja': '日本語',
|
||||
'tr': 'Türkçe',
|
||||
'fa': 'فارسی'
|
||||
'ja': '日本語'
|
||||
}
|
||||
|
||||
site_names = {
|
||||
@ -84,31 +77,26 @@ def build_for_lang(lang, args):
|
||||
'es': 'Documentación de ClickHouse %s',
|
||||
'fr': 'Documentation ClickHouse %s',
|
||||
'ru': 'Документация ClickHouse %s',
|
||||
'ja': 'ClickHouseドキュメント %s',
|
||||
'tr': 'ClickHouse Belgeleri %s',
|
||||
'fa': 'مستندات %sClickHouse'
|
||||
'ja': 'ClickHouseドキュメント %s'
|
||||
}
|
||||
|
||||
assert len(site_names) == len(languages)
|
||||
|
||||
if args.version_prefix:
|
||||
site_dir = os.path.join(args.docs_output_dir, args.version_prefix, lang)
|
||||
else:
|
||||
site_dir = os.path.join(args.docs_output_dir, lang)
|
||||
site_dir = os.path.join(args.docs_output_dir, lang)
|
||||
|
||||
plugins = ['macros']
|
||||
if args.htmlproofer:
|
||||
plugins.append('htmlproofer')
|
||||
|
||||
website_url = 'https://clickhouse.tech'
|
||||
site_name = site_names.get(lang, site_names['en']) % args.version_prefix
|
||||
site_name = site_names.get(lang, site_names['en']) % ''
|
||||
site_name = site_name.replace(' ', ' ')
|
||||
raw_config = dict(
|
||||
site_name=site_name,
|
||||
site_url=f'{website_url}/docs/{lang}/',
|
||||
docs_dir=os.path.join(args.docs_dir, lang),
|
||||
site_dir=site_dir,
|
||||
strict=not args.version_prefix,
|
||||
strict=True,
|
||||
theme=theme_cfg,
|
||||
copyright='©2016–2020 Yandex LLC',
|
||||
use_directory_urls=True,
|
||||
@ -119,8 +107,6 @@ def build_for_lang(lang, args):
|
||||
plugins=plugins,
|
||||
extra=dict(
|
||||
now=datetime.datetime.now().isoformat(),
|
||||
stable_releases=args.stable_releases,
|
||||
version_prefix=args.version_prefix,
|
||||
single_page=False,
|
||||
rev=args.rev,
|
||||
rev_short=args.rev_short,
|
||||
@ -134,23 +120,14 @@ def build_for_lang(lang, args):
|
||||
)
|
||||
)
|
||||
|
||||
if os.path.exists(config_path):
|
||||
raw_config['config_file'] = config_path
|
||||
else:
|
||||
raw_config['nav'] = nav.build_docs_nav(lang, args)
|
||||
raw_config['nav'] = nav.build_docs_nav(lang, args)
|
||||
|
||||
cfg = config.load_config(**raw_config)
|
||||
|
||||
if not args.skip_multi_page:
|
||||
try:
|
||||
mkdocs.commands.build.build(cfg)
|
||||
except jinja2.exceptions.TemplateError:
|
||||
if not args.version_prefix:
|
||||
raise
|
||||
mdx_clickhouse.PatchedMacrosPlugin.disabled = True
|
||||
mkdocs.commands.build.build(cfg)
|
||||
mkdocs.commands.build.build(cfg)
|
||||
|
||||
if not (args.skip_amp or args.version_prefix):
|
||||
if not args.skip_amp:
|
||||
amp.build_amp(lang, args, cfg)
|
||||
|
||||
if not args.skip_single_page:
|
||||
@ -170,8 +147,7 @@ def build_docs(args):
|
||||
if lang:
|
||||
tasks.append((lang, args,))
|
||||
util.run_function_in_parallel(build_for_lang, tasks, threads=False)
|
||||
if not args.version_prefix:
|
||||
redirects.build_docs_redirects(args)
|
||||
redirects.build_docs_redirects(args)
|
||||
|
||||
|
||||
def build(args):
|
||||
@ -188,8 +164,6 @@ def build(args):
|
||||
generate_cmake_flags_files()
|
||||
|
||||
build_docs(args)
|
||||
from github import build_releases
|
||||
build_releases(args, build_docs)
|
||||
|
||||
if not args.skip_blog:
|
||||
blog.build_blog(args)
|
||||
@ -209,7 +183,7 @@ if __name__ == '__main__':
|
||||
website_dir = os.path.join(src_dir, 'website')
|
||||
|
||||
arg_parser = argparse.ArgumentParser()
|
||||
arg_parser.add_argument('--lang', default='en,es,fr,ru,zh,ja,tr,fa')
|
||||
arg_parser.add_argument('--lang', default='en,es,fr,ru,zh,ja')
|
||||
arg_parser.add_argument('--blog-lang', default='en,ru')
|
||||
arg_parser.add_argument('--docs-dir', default='.')
|
||||
arg_parser.add_argument('--theme-dir', default=website_dir)
|
||||
@ -217,12 +191,7 @@ if __name__ == '__main__':
|
||||
arg_parser.add_argument('--src-dir', default=src_dir)
|
||||
arg_parser.add_argument('--blog-dir', default=os.path.join(website_dir, 'blog'))
|
||||
arg_parser.add_argument('--output-dir', default='build')
|
||||
arg_parser.add_argument('--enable-stable-releases', action='store_true')
|
||||
arg_parser.add_argument('--stable-releases-limit', type=int, default='3')
|
||||
arg_parser.add_argument('--lts-releases-limit', type=int, default='2')
|
||||
arg_parser.add_argument('--nav-limit', type=int, default='0')
|
||||
arg_parser.add_argument('--version-prefix', type=str, default='')
|
||||
arg_parser.add_argument('--is-stable-release', action='store_true')
|
||||
arg_parser.add_argument('--skip-multi-page', action='store_true')
|
||||
arg_parser.add_argument('--skip-single-page', action='store_true')
|
||||
arg_parser.add_argument('--skip-amp', action='store_true')
|
||||
@ -252,8 +221,7 @@ if __name__ == '__main__':
|
||||
args.docs_output_dir = os.path.join(os.path.abspath(args.output_dir), 'docs')
|
||||
args.blog_output_dir = os.path.join(os.path.abspath(args.output_dir), 'blog')
|
||||
|
||||
from github import choose_latest_releases, get_events
|
||||
args.stable_releases = choose_latest_releases(args) if args.enable_stable_releases else []
|
||||
from github import get_events
|
||||
args.rev = subprocess.check_output('git rev-parse HEAD', shell=True).decode('utf-8').strip()
|
||||
args.rev_short = subprocess.check_output('git rev-parse --short HEAD', shell=True).decode('utf-8').strip()
|
||||
args.rev_url = f'https://github.com/ClickHouse/ClickHouse/commit/{args.rev}'
|
||||
|
@ -13,88 +13,6 @@ import requests
|
||||
import util
|
||||
|
||||
|
||||
def yield_candidates():
|
||||
for page in range(1, 100):
|
||||
url = f'https://api.github.com/repos/ClickHouse/ClickHouse/tags?per_page=100&page={page}'
|
||||
github_token = os.getenv('GITHUB_TOKEN')
|
||||
if github_token:
|
||||
headers = {'authorization': f'OAuth {github_token}'}
|
||||
else:
|
||||
headers = {}
|
||||
for candidate in requests.get(url, headers=headers).json():
|
||||
yield candidate
|
||||
time.sleep(random.random() * 3)
|
||||
|
||||
|
||||
def choose_latest_releases(args):
|
||||
logging.info('Collecting release candidates')
|
||||
seen_stable = collections.OrderedDict()
|
||||
seen_lts = collections.OrderedDict()
|
||||
candidates = []
|
||||
stable_count = 0
|
||||
lts_count = 0
|
||||
|
||||
for tag in yield_candidates():
|
||||
if isinstance(tag, dict):
|
||||
name = tag.get('name', '')
|
||||
is_stable = 'stable' in name
|
||||
is_lts = 'lts' in name
|
||||
is_unstable = not (is_stable or is_lts)
|
||||
is_in_blacklist = ('v18' in name) or ('prestable' in name) or ('v1.1' in name)
|
||||
if is_unstable or is_in_blacklist:
|
||||
continue
|
||||
major_version = '.'.join((name.split('.', 2))[:2])
|
||||
if major_version not in seen_lts:
|
||||
if (stable_count >= args.stable_releases_limit) and (lts_count >= args.lts_releases_limit):
|
||||
break
|
||||
|
||||
payload = (name, tag.get('tarball_url'), is_lts,)
|
||||
logging.debug(payload)
|
||||
if is_lts:
|
||||
if lts_count < args.lts_releases_limit:
|
||||
seen_lts[major_version] = payload
|
||||
try:
|
||||
del seen_stable[major_version]
|
||||
except KeyError:
|
||||
pass
|
||||
lts_count += 1
|
||||
else:
|
||||
if stable_count < args.stable_releases_limit:
|
||||
if major_version not in seen_stable:
|
||||
seen_stable[major_version] = payload
|
||||
stable_count += 1
|
||||
|
||||
logging.debug(
|
||||
f'Stables: {stable_count}/{args.stable_releases_limit} LTS: {lts_count}/{args.lts_releases_limit}'
|
||||
)
|
||||
else:
|
||||
logging.fatal('Unexpected GitHub response: %s', str(candidates))
|
||||
sys.exit(1)
|
||||
|
||||
logging.info('Found LTS releases: %s', ', '.join(list(seen_lts.keys())))
|
||||
logging.info('Found stable releases: %s', ', '.join(list(seen_stable.keys())))
|
||||
return sorted(list(seen_lts.items()) + list(seen_stable.items()))
|
||||
|
||||
|
||||
def process_release(args, callback, release):
|
||||
name, (full_name, tarball_url, is_lts,) = release
|
||||
logging.info(f'Building docs for {full_name}')
|
||||
buf = io.BytesIO(requests.get(tarball_url).content)
|
||||
tar = tarfile.open(mode='r:gz', fileobj=buf)
|
||||
with util.temp_dir() as base_dir:
|
||||
tar.extractall(base_dir)
|
||||
args = copy.copy(args)
|
||||
args.version_prefix = name
|
||||
args.is_stable_release = True
|
||||
args.docs_dir = os.path.join(base_dir, os.listdir(base_dir)[0], 'docs')
|
||||
callback(args)
|
||||
|
||||
|
||||
def build_releases(args, callback):
|
||||
for release in args.stable_releases:
|
||||
process_release(args, callback, release)
|
||||
|
||||
|
||||
def get_events(args):
|
||||
events = []
|
||||
skip = True
|
||||
@ -118,12 +36,7 @@ def get_events(args):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
class DummyArgs(object):
|
||||
lts_releases_limit = 1
|
||||
stable_releases_limit = 3
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
stream=sys.stderr
|
||||
)
|
||||
for item in choose_latest_releases(DummyArgs()):
|
||||
print(item)
|
||||
|
@ -145,24 +145,9 @@ class PatchedMacrosPlugin(macros.plugin.MacrosPlugin):
|
||||
if self.skip_git_log:
|
||||
return markdown
|
||||
src_path = page.file.abs_src_path
|
||||
try:
|
||||
git_log = subprocess.check_output(f'git log --follow --date=iso8601 "{src_path}"', shell=True)
|
||||
except subprocess.CalledProcessError:
|
||||
return markdown
|
||||
max_date = None
|
||||
min_date = None
|
||||
for line in git_log.decode('utf-8').split('\n'):
|
||||
if line.startswith('Date:'):
|
||||
line = line.replace('Date:', '').strip().replace(' ', 'T', 1).replace(' ', '')
|
||||
current_date = datetime.datetime.fromisoformat(line[:-2] + ':' + line[-2:])
|
||||
if (not max_date) or current_date > max_date:
|
||||
max_date = current_date
|
||||
if (not min_date) or current_date < min_date:
|
||||
min_date = current_date
|
||||
if min_date:
|
||||
page.meta['published_date'] = min_date
|
||||
if max_date:
|
||||
page.meta['modified_date'] = max_date
|
||||
|
||||
# There was a code that determined the minimum and maximum modification dates for a page.
|
||||
# It was removed due to being obnoxiously slow.
|
||||
return markdown
|
||||
|
||||
def render_impl(self, markdown):
|
||||
|
@ -30,9 +30,8 @@ def build_redirect_html(args, base_prefix, lang, output_dir, from_path, to_path)
|
||||
output_dir, lang,
|
||||
from_path.replace('/index.md', '/index.html').replace('.md', '/index.html')
|
||||
)
|
||||
version_prefix = f'/{args.version_prefix}/' if args.version_prefix else '/'
|
||||
target_path = to_path.replace('/index.md', '/').replace('.md', '/')
|
||||
to_url = f'/{base_prefix}{version_prefix}{lang}/{target_path}'
|
||||
to_url = f'/{base_prefix}/{lang}/{target_path}'
|
||||
to_url = to_url.strip()
|
||||
write_redirect_html(out_path, to_url)
|
||||
|
||||
|
@ -7,15 +7,14 @@ PUBLISH_DIR="${BASE_DIR}/../publish"
|
||||
BASE_DOMAIN="${BASE_DOMAIN:-content.clickhouse.tech}"
|
||||
GIT_TEST_URI="${GIT_TEST_URI:-git@github.com:ClickHouse/clickhouse-website-content.git}"
|
||||
GIT_PROD_URI="git@github.com:ClickHouse/clickhouse-website-content.git"
|
||||
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS:---enable-stable-releases --minify --verbose}"
|
||||
HISTORY_SIZE="${HISTORY_SIZE:-5}"
|
||||
EXTRA_BUILD_ARGS="${EXTRA_BUILD_ARGS:---minify --verbose}"
|
||||
|
||||
if [[ -z "$1" ]]
|
||||
then
|
||||
source "${BASE_DIR}/venv/bin/activate"
|
||||
python3 "${BASE_DIR}/build.py" ${EXTRA_BUILD_ARGS}
|
||||
rm -rf "${PUBLISH_DIR}" || true
|
||||
cd "${PUBLISH_DIR}"
|
||||
rm -rf "${PUBLISH_DIR}"
|
||||
mkdir "${PUBLISH_DIR}" && cd "${PUBLISH_DIR}"
|
||||
|
||||
# Will make a repository with website content as the only commit.
|
||||
git init
|
||||
@ -33,7 +32,7 @@ then
|
||||
git add ".nojekyll"
|
||||
|
||||
# Push to GitHub rewriting the existing contents.
|
||||
git commit -a -m "Add new release at $(date)"
|
||||
git commit --quiet -m "Add new release at $(date)"
|
||||
git push --force origin master
|
||||
|
||||
if [[ ! -z "${CLOUDFLARE_TOKEN}" ]]
|
||||
|
@ -111,10 +111,7 @@ def build_single_page_version(lang, args, nav, cfg):
|
||||
if not args.test_only:
|
||||
mkdocs.commands.build.build(cfg)
|
||||
|
||||
if args.version_prefix:
|
||||
single_page_output_path = os.path.join(args.docs_dir, args.docs_output_dir, args.version_prefix, lang, 'single')
|
||||
else:
|
||||
single_page_output_path = os.path.join(args.docs_dir, args.docs_output_dir, lang, 'single')
|
||||
single_page_output_path = os.path.join(args.docs_dir, args.docs_output_dir, lang, 'single')
|
||||
|
||||
if os.path.exists(single_page_output_path):
|
||||
shutil.rmtree(single_page_output_path)
|
||||
@ -157,10 +154,9 @@ def build_single_page_version(lang, args, nav, cfg):
|
||||
if args.save_raw_single_page:
|
||||
shutil.copytree(test_dir, args.save_raw_single_page)
|
||||
|
||||
if not args.version_prefix: # maybe enable in future
|
||||
logging.info(f'Running tests for {lang}')
|
||||
test.test_single_page(
|
||||
os.path.join(test_dir, 'single', 'index.html'), lang)
|
||||
logging.info(f'Running tests for {lang}')
|
||||
test.test_single_page(
|
||||
os.path.join(test_dir, 'single', 'index.html'), lang)
|
||||
|
||||
if not args.skip_pdf:
|
||||
single_page_index_html = os.path.join(test_dir, 'single', 'index.html')
|
||||
|
@ -11,8 +11,6 @@ import googletrans
|
||||
import requests
|
||||
import yaml
|
||||
|
||||
import typograph_ru
|
||||
|
||||
|
||||
translator = googletrans.Translator()
|
||||
default_target_language = os.environ.get('TARGET_LANGUAGE', 'ru')
|
||||
@ -25,8 +23,6 @@ def translate_impl(text, target_language=None):
|
||||
target_language = target_language or default_target_language
|
||||
if target_language == 'en':
|
||||
return text
|
||||
elif target_language == 'typograph_ru':
|
||||
return typograph_ru.typograph(text)
|
||||
elif is_yandex:
|
||||
text = text.replace('‘', '\'')
|
||||
text = text.replace('’', '\'')
|
||||
@ -59,25 +55,10 @@ def translate(text, target_language=None):
|
||||
)
|
||||
|
||||
|
||||
def translate_toc(root, lang):
|
||||
global is_yandex
|
||||
is_yandex = True
|
||||
if isinstance(root, dict):
|
||||
result = []
|
||||
for key, value in root.items():
|
||||
key = translate(key, lang) if key != 'hidden' and not key.isupper() else key
|
||||
result.append((key, translate_toc(value, lang),))
|
||||
return dict(result)
|
||||
elif isinstance(root, list):
|
||||
return [translate_toc(item, lang) for item in root]
|
||||
elif isinstance(root, str):
|
||||
return root
|
||||
|
||||
|
||||
def translate_po():
|
||||
import babel.messages.pofile
|
||||
base_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'website', 'locale')
|
||||
for lang in ['en', 'zh', 'es', 'fr', 'ru', 'ja', 'tr', 'fa']:
|
||||
for lang in ['en', 'zh', 'es', 'fr', 'ru', 'ja']:
|
||||
po_path = os.path.join(base_dir, lang, 'LC_MESSAGES', 'messages.po')
|
||||
with open(po_path, 'r') as f:
|
||||
po_file = babel.messages.pofile.read_po(f, locale=lang, domain='messages')
|
||||
|
@ -232,6 +232,7 @@ def minify_website(args):
|
||||
f"'{args.output_dir}/docs/en/**/*.html' '{args.website_dir}/js/**/*.js' > {css_out}"
|
||||
else:
|
||||
command = f'cat {css_in} > {css_out}'
|
||||
|
||||
logging.info(command)
|
||||
output = subprocess.check_output(command, shell=True)
|
||||
logging.debug(output)
|
||||
|
@ -7,9 +7,9 @@ toc_title: DateTime64
|
||||
|
||||
# Datetime64 {#data_type-datetime64}
|
||||
|
||||
允许存储时间instant间,可以表示为日历日期和一天中的时间,具有定义的亚秒精度
|
||||
此类型允许以日期(date)加时间(time)的形式来存储一个时刻的时间值,具有定义的亚秒精度
|
||||
|
||||
刻度尺寸(精度):10<sup>-精度</sup> 秒
|
||||
时间刻度大小(精度):10<sup>-精度</sup> 秒
|
||||
|
||||
语法:
|
||||
|
||||
@ -17,11 +17,11 @@ toc_title: DateTime64
|
||||
DateTime64(precision, [timezone])
|
||||
```
|
||||
|
||||
在内部,存储数据作为一些 ‘ticks’ 自纪元开始(1970-01-01 00:00:00UTC)作为Int64. 刻度分辨率由precision参数确定。 此外,该 `DateTime64` 类型可以存储时区是相同的整个列,影响如何的值 `DateTime64` 类型值以文本格式显示,以及如何解析指定为字符串的值 (‘2020-01-01 05:00:01.000’). 时区不存储在表的行中(或resultset中),而是存储在列元数据中。 查看详细信息 [日期时间](datetime.md).
|
||||
在内部,此类型以Int64类型将数据存储为自Linux纪元开始(1970-01-01 00:00:00UTC)的时间刻度数(ticks)。时间刻度的分辨率由precision参数确定。此外,`DateTime64` 类型可以像存储其他数据列一样存储时区信息,时区会影响 `DateTime64` 类型的值如何以文本格式显示,以及如何解析以字符串形式指定的时间数据 (‘2020-01-01 05:00:01.000’)。时区不存储在表的行中(也不在resultset中),而是存储在列的元数据中。详细信息请参考 [DateTime](datetime.md) 数据类型.
|
||||
|
||||
## 例 {#examples}
|
||||
## 示例 {#examples}
|
||||
|
||||
**1.** 创建一个表 `DateTime64`-输入列并将数据插入其中:
|
||||
**1.** 创建一个具有 `DateTime64` 类型列的表,并向其中插入数据:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE dt
|
||||
@ -47,10 +47,10 @@ SELECT * FROM dt
|
||||
└─────────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
- 将日期时间作为整数插入时,将其视为适当缩放的Unix时间戳(UTC)。 `1546300800000` (精度为3)表示 `'2019-01-01 00:00:00'` UTC. 然而,作为 `timestamp` 列有 `Europe/Moscow` (UTC+3)指定的时区,当输出为字符串时,该值将显示为 `'2019-01-01 03:00:00'`
|
||||
- 当插入字符串值作为日期时间时,它被视为处于列时区。 `'2019-01-01 00:00:00'` 将被视为 `Europe/Moscow` 时区并存储为 `1546290000000`.
|
||||
- 将日期时间作为integer类型插入时,它会被视为适当缩放的Unix时间戳(UTC)。`1546300800000` (精度为3)表示 `'2019-01-01 00:00:00'` UTC. 不过,因为 `timestamp` 列指定了 `Europe/Moscow` (UTC+3)的时区,当作为字符串输出时,它将显示为 `'2019-01-01 03:00:00'`
|
||||
- 当把字符串作为日期时间插入时,它会被赋予时区信息。 `'2019-01-01 00:00:00'` 将被认为处于 `Europe/Moscow` 时区并被存储为 `1546290000000`.
|
||||
|
||||
**2.** 过滤 `DateTime64` 值
|
||||
**2.** 过滤 `DateTime64` 类型的值
|
||||
|
||||
``` sql
|
||||
SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow')
|
||||
@ -62,9 +62,9 @@ SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europ
|
||||
└─────────────────────────┴──────────┘
|
||||
```
|
||||
|
||||
不像 `DateTime`, `DateTime64` 值不转换为 `String` 自动
|
||||
与 `DateTime` 不同, `DateTime64` 类型的值不会自动从 `String` 类型的值转换过来
|
||||
|
||||
**3.** 获取一个时区 `DateTime64`-类型值:
|
||||
**3.** 获取 `DateTime64` 类型值的时区信息:
|
||||
|
||||
``` sql
|
||||
SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x
|
||||
@ -97,8 +97,9 @@ FROM dt
|
||||
- [类型转换函数](../../sql-reference/functions/type-conversion-functions.md)
|
||||
- [用于处理日期和时间的函数](../../sql-reference/functions/date-time-functions.md)
|
||||
- [用于处理数组的函数](../../sql-reference/functions/array-functions.md)
|
||||
- [该 `date_time_input_format` 设置](../../operations/settings/settings.md#settings-date_time_input_format)
|
||||
- [该 `timezone` 服务器配置参数](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [使用日期和时间的操作员](../../sql-reference/operators/index.md#operators-datetime)
|
||||
- [`date_time_input_format` 配置](../../operations/settings/settings.md#settings-date_time_input_format)
|
||||
- [`date_time_output_format` 配置](../../operations/settings/settings.md#settings-date_time_output_format)
|
||||
- [`timezone` 服务器配置参数](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [用于处理日期和时间的算子](../../sql-reference/operators/index.md#operators-datetime)
|
||||
- [`Date` 数据类型](date.md)
|
||||
- [`DateTime` 数据类型](datetime.md)
|
||||
|
@ -10,6 +10,10 @@
|
||||
#include <linux/capability.h>
|
||||
#endif
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
#include <mach-o/dyld.h>
|
||||
#endif
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ShellCommand.h>
|
||||
#include <Common/formatReadable.h>
|
||||
@ -147,9 +151,24 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
try
|
||||
{
|
||||
/// We need to copy binary to the binary directory.
|
||||
/// The binary is currently run. We need to obtain its path from procfs.
|
||||
/// The binary is currently run. We need to obtain its path from procfs (on Linux).
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
uint32_t path_length = 0;
|
||||
_NSGetExecutablePath(nullptr, &path_length);
|
||||
if (path_length <= 1)
|
||||
Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
|
||||
std::string path(path_length, std::string::value_type());
|
||||
auto res = _NSGetExecutablePath(&path[0], &path_length);
|
||||
if (res != 0)
|
||||
Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary");
|
||||
|
||||
fs::path binary_self_path(path);
|
||||
#else
|
||||
fs::path binary_self_path = "/proc/self/exe";
|
||||
#endif
|
||||
|
||||
if (!fs::exists(binary_self_path))
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot obtain path to the binary from {}, file doesn't exist",
|
||||
binary_self_path.string());
|
||||
|
@ -308,53 +308,11 @@ void checkRequiredInstructions()
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
/// clickhouse uses jemalloc as a production allocator
|
||||
/// and jemalloc relies on working MADV_DONTNEED,
|
||||
/// which doesn't work under qemu
|
||||
///
|
||||
/// but do this only under for linux, since only it return zeroed pages after MADV_DONTNEED
|
||||
/// (and jemalloc assumes this too, see contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h.in)
|
||||
void checkRequiredMadviseFlags()
|
||||
{
|
||||
size_t size = 1 << 16;
|
||||
void * addr = mmap(nullptr, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
if (addr == MAP_FAILED)
|
||||
{
|
||||
writeError("Can not mmap pages for MADV_DONTNEED check\n");
|
||||
_Exit(1);
|
||||
}
|
||||
memset(addr, 'A', size);
|
||||
|
||||
if (!madvise(addr, size, MADV_DONTNEED))
|
||||
{
|
||||
/// Suboptimal, but should be simple.
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
if (reinterpret_cast<unsigned char *>(addr)[i] != 0)
|
||||
{
|
||||
writeError("MADV_DONTNEED does not zeroed page. jemalloc will be broken\n");
|
||||
_Exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (munmap(addr, size))
|
||||
{
|
||||
writeError("Can not munmap pages for MADV_DONTNEED check\n");
|
||||
_Exit(1);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
struct Checker
|
||||
{
|
||||
Checker()
|
||||
{
|
||||
checkRequiredInstructions();
|
||||
#ifdef __linux__
|
||||
checkRequiredMadviseFlags();
|
||||
#endif
|
||||
}
|
||||
} checker;
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <errno.h>
|
||||
#include <pwd.h>
|
||||
#include <unistd.h>
|
||||
@ -103,6 +104,7 @@ namespace CurrentMetrics
|
||||
int mainEntryClickHouseServer(int argc, char ** argv)
|
||||
{
|
||||
DB::Server app;
|
||||
app.shouldSetupWatchdog(argc ? argv[0] : nullptr);
|
||||
try
|
||||
{
|
||||
return app.run(argc, argv);
|
||||
@ -366,6 +368,7 @@ void checkForUsersNotInMainConfig(
|
||||
int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
Poco::Logger * log = &logger();
|
||||
|
||||
UseSSL use_ssl;
|
||||
|
||||
MainThreadStatus::getInstance();
|
||||
@ -770,7 +773,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
http_params->setTimeout(settings.http_receive_timeout);
|
||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||
|
||||
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
|
||||
auto servers_to_start_before_tables = std::make_shared<std::vector<ProtocolServerAdapter>>();
|
||||
|
||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||
|
||||
@ -792,7 +795,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers_to_start_before_tables.emplace_back(
|
||||
servers_to_start_before_tables->emplace_back(
|
||||
port_name,
|
||||
std::make_unique<Poco::Net::TCPServer>(
|
||||
new TestKeeperTCPHandlerFactory(*this), server_pool, socket, new Poco::Net::TCPServerParams));
|
||||
@ -801,7 +804,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
}
|
||||
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
for (auto & server : *servers_to_start_before_tables)
|
||||
server.start();
|
||||
|
||||
SCOPE_EXIT({
|
||||
@ -816,11 +819,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
LOG_DEBUG(log, "Shut down storages.");
|
||||
|
||||
if (!servers_to_start_before_tables.empty())
|
||||
if (!servers_to_start_before_tables->empty())
|
||||
{
|
||||
LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish.");
|
||||
int current_connections = 0;
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
for (auto & server : *servers_to_start_before_tables)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
@ -832,7 +835,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "Closed all listening sockets.");
|
||||
|
||||
if (current_connections > 0)
|
||||
current_connections = waitServersToFinish(servers_to_start_before_tables, config().getInt("shutdown_wait_unfinished", 5));
|
||||
current_connections = waitServersToFinish(*servers_to_start_before_tables, config().getInt("shutdown_wait_unfinished", 5));
|
||||
|
||||
if (current_connections)
|
||||
LOG_INFO(log, "Closed connections to servers for tables. But {} remain. Probably some tables of other users cannot finish their connections after context shutdown.", current_connections);
|
||||
@ -978,7 +981,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "TaskStats is not implemented for this OS. IO accounting will be disabled.");
|
||||
#endif
|
||||
|
||||
std::vector<ProtocolServerAdapter> servers;
|
||||
auto servers = std::make_shared<std::vector<ProtocolServerAdapter>>();
|
||||
{
|
||||
/// This object will periodically calculate some metrics.
|
||||
AsynchronousMetrics async_metrics(
|
||||
@ -996,7 +999,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
createHandlerFactory(*this, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params));
|
||||
|
||||
LOG_INFO(log, "Listening for http://{}", address.toString());
|
||||
@ -1011,7 +1014,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
createHandlerFactory(*this, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
|
||||
|
||||
LOG_INFO(log, "Listening for https://{}", address.toString());
|
||||
@ -1030,7 +1033,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false),
|
||||
server_pool,
|
||||
socket,
|
||||
@ -1047,7 +1050,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true),
|
||||
server_pool,
|
||||
socket,
|
||||
@ -1065,7 +1068,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false),
|
||||
server_pool,
|
||||
socket,
|
||||
@ -1086,7 +1089,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
createHandlerFactory(*this, async_metrics, "InterserverIOHTTPHandler-factory"), server_pool, socket, http_params));
|
||||
|
||||
LOG_INFO(log, "Listening for replica communication (interserver): http://{}", address.toString());
|
||||
@ -1100,7 +1103,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
createHandlerFactory(*this, async_metrics, "InterserverIOHTTPSHandler-factory"), server_pool, socket, http_params));
|
||||
|
||||
LOG_INFO(log, "Listening for secure replica communication (interserver): https://{}", address.toString());
|
||||
@ -1118,7 +1121,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
new MySQLHandlerFactory(*this),
|
||||
server_pool,
|
||||
socket,
|
||||
@ -1134,7 +1137,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::TCPServer>(
|
||||
new PostgreSQLHandlerFactory(*this),
|
||||
server_pool,
|
||||
socket,
|
||||
@ -1148,7 +1151,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
createServer(listen_host, port_name, listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::SocketAddress server_address(listen_host, port);
|
||||
servers.emplace_back(port_name, std::make_unique<GRPCServer>(*this, makeSocketAddress(listen_host, port, log)));
|
||||
servers->emplace_back(port_name, std::make_unique<GRPCServer>(*this, makeSocketAddress(listen_host, port, log)));
|
||||
LOG_INFO(log, "Listening for gRPC protocol: " + server_address.toString());
|
||||
});
|
||||
#endif
|
||||
@ -1161,14 +1164,14 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
servers->emplace_back(port_name, std::make_unique<Poco::Net::HTTPServer>(
|
||||
createHandlerFactory(*this, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
||||
|
||||
LOG_INFO(log, "Listening for Prometheus: http://{}", address.toString());
|
||||
});
|
||||
}
|
||||
|
||||
if (servers.empty())
|
||||
if (servers->empty())
|
||||
throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
|
||||
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
|
||||
@ -1176,7 +1179,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
async_metrics.start();
|
||||
global_context->enableNamedSessions();
|
||||
|
||||
for (auto & server : servers)
|
||||
for (auto & server : *servers)
|
||||
server.start();
|
||||
|
||||
{
|
||||
@ -1208,7 +1211,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
is_cancelled = true;
|
||||
|
||||
int current_connections = 0;
|
||||
for (auto & server : servers)
|
||||
for (auto & server : *servers)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
@ -1223,7 +1226,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
global_context->getProcessList().killAllQueries();
|
||||
|
||||
if (current_connections)
|
||||
current_connections = waitServersToFinish(servers, config().getInt("shutdown_wait_unfinished", 5));
|
||||
current_connections = waitServersToFinish(*servers, config().getInt("shutdown_wait_unfinished", 5));
|
||||
|
||||
if (current_connections)
|
||||
LOG_INFO(log, "Closed connections. But {} remain."
|
||||
|
@ -363,7 +363,7 @@ endif ()
|
||||
|
||||
if (USE_PARQUET)
|
||||
dbms_target_link_libraries(PRIVATE ${PARQUET_LIBRARY})
|
||||
if (NOT USE_INTERNAL_PARQUET_LIBRARY OR USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
|
||||
if (NOT USE_INTERNAL_PARQUET_LIBRARY)
|
||||
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${PARQUET_INCLUDE_DIR} ${ARROW_INCLUDE_DIR})
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
dbms_target_link_libraries(PRIVATE ${ARROW_LIBRARY})
|
||||
|
@ -8,9 +8,7 @@
|
||||
*/
|
||||
struct SimpleIncrement
|
||||
{
|
||||
std::atomic<UInt64> value;
|
||||
|
||||
SimpleIncrement(UInt64 start = 0) : value(start) {}
|
||||
std::atomic<UInt64> value{0};
|
||||
|
||||
void set(UInt64 new_value)
|
||||
{
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <common/sleep.h>
|
||||
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
@ -4,9 +4,6 @@ target_link_libraries(zkutil_test_commands PRIVATE clickhouse_common_zookeeper)
|
||||
add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp)
|
||||
target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper string_utils)
|
||||
|
||||
add_executable(zkutil_expiration_test zkutil_expiration_test.cpp)
|
||||
target_link_libraries(zkutil_expiration_test PRIVATE clickhouse_common_zookeeper)
|
||||
|
||||
add_executable(zkutil_test_async zkutil_test_async.cpp)
|
||||
target_link_libraries(zkutil_test_async PRIVATE clickhouse_common_zookeeper)
|
||||
|
||||
|
@ -1,15 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Добавляет в файрвол правила, не пропускающие пакеты до серверов ZooKeeper.
|
||||
# Используется для тестирования поведения программ при потере соединения с ZooKeeper.
|
||||
# yeszk.sh производит обратные изменения.
|
||||
|
||||
# Чтобы посмотреть, какие правила сейчас есть, используйте sudo iptables -L и sudo ip6tables -L
|
||||
|
||||
sudo iptables -A OUTPUT -p tcp --dport 2181 -j DROP
|
||||
sudo ip6tables -A OUTPUT -p tcp --dport 2181 -j DROP
|
||||
|
||||
# You could also test random drops:
|
||||
#sudo iptables -A OUTPUT -p tcp --dport 2181 -j REJECT --reject-with tcp-reset -m statistic --mode random --probability 0.1
|
||||
#sudo ip6tables -A OUTPUT -p tcp --dport 2181 -j REJECT --reject-with tcp-reset -m statistic --mode random --probability 0.1
|
||||
|
@ -1,6 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Выполняет действия, обратные nozk.sh
|
||||
|
||||
cat nozk.sh | sed 's/-A/-D/g' | bash
|
||||
|
@ -1,70 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Poco/ConsoleChannel.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
|
||||
/// Проверяет, какие ошибки выдает ZooKeeper при попытке сделать какую-нибудь операцию через разное время после истечения сессии.
|
||||
/// Спойлер: multi иногда падает с segfault, а до этого фейлится с marshalling error.
|
||||
/// create всегда фейлится с invalid zhandle state.
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (argc != 2)
|
||||
{
|
||||
std::cerr << "usage: " << argv[0] << " hosts" << std::endl;
|
||||
return 2;
|
||||
}
|
||||
|
||||
Poco::AutoPtr<Poco::ConsoleChannel> channel = new Poco::ConsoleChannel(std::cerr);
|
||||
Poco::Logger::root().setChannel(channel);
|
||||
Poco::Logger::root().setLevel("trace");
|
||||
|
||||
zkutil::ZooKeeper zk(argv[1]);
|
||||
std::string unused;
|
||||
zk.tryCreate("/test", "", zkutil::CreateMode::Persistent, unused);
|
||||
|
||||
std::cerr << "Please run `./nozk.sh && sleep 40s && ./yeszk.sh`" << std::endl;
|
||||
|
||||
time_t time0 = time(nullptr);
|
||||
|
||||
while (true)
|
||||
{
|
||||
{
|
||||
Coordination::Requests ops;
|
||||
ops.emplace_back(zkutil::makeCreateRequest("/test/zk_expiration_test", "hello", zkutil::CreateMode::Persistent));
|
||||
ops.emplace_back(zkutil::makeRemoveRequest("/test/zk_expiration_test", -1));
|
||||
|
||||
Coordination::Responses responses;
|
||||
Coordination::Error code = zk.tryMultiNoThrow(ops, responses);
|
||||
|
||||
std::cout << time(nullptr) - time0 << "s: " << Coordination::errorMessage(code) << std::endl;
|
||||
try
|
||||
{
|
||||
if (code != Coordination::Error::ZOK)
|
||||
std::cout << "Path: " << zkutil::KeeperMultiException(code, ops, responses).getPathForFirstFailedOp() << std::endl;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cout << DB::getCurrentExceptionMessage(false) << std::endl;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
sleep(1);
|
||||
}
|
||||
}
|
||||
catch (Coordination::Exception &)
|
||||
{
|
||||
std::cerr << "KeeperException: " << DB::getCurrentExceptionMessage(true) << std::endl;
|
||||
return 1;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << "Some exception: " << DB::getCurrentExceptionMessage(true) << std::endl;
|
||||
return 2;
|
||||
}
|
||||
}
|
@ -415,7 +415,9 @@ class IColumn;
|
||||
M(UInt64, multiple_joins_rewriter_version, 0, "Obsolete setting, does nothing. Will be removed after 2021-03-31", 0) \
|
||||
M(Bool, enable_debug_queries, false, "Enabled debug queries, but now is obsolete", 0) \
|
||||
M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing. Will be removed after 2021-02-12", 0) \
|
||||
M(UnionMode, union_default_mode, UnionMode::DISTINCT, "Set default Union Mode in SelectWithUnion query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without Union Mode will throw exception.", 0)
|
||||
M(UnionMode, union_default_mode, UnionMode::DISTINCT, "Set default Union Mode in SelectWithUnion query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without Union Mode will throw exception.", 0) \
|
||||
M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \
|
||||
M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \
|
||||
|
||||
// End of COMMON_SETTINGS
|
||||
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS below.
|
||||
@ -427,10 +429,10 @@ class IColumn;
|
||||
M(Bool, output_format_csv_crlf_end_of_line, false, "If it is set true, end of line in CSV format will be \\r\\n instead of \\n.", 0) \
|
||||
M(Bool, input_format_csv_unquoted_null_literal_as_null, false, "Consider unquoted NULL literal as \\N", 0) \
|
||||
M(Bool, input_format_csv_enum_as_number, false, "Treat inserted enum values in CSV formats as enum indices \\N", 0) \
|
||||
M(Bool, input_format_csv_arrays_as_nested_csv, false, R"(When reading Array from CSV, expect that its elements were serialized in nested CSV and then put into string. Example: "[""Hello"", ""world"", ""42"""" TV""]". Braces around array can be omitted.)", 0) \
|
||||
M(Bool, input_format_skip_unknown_fields, false, "Skip columns with unknown names from input data (it works for JSONEachRow, CSVWithNames, TSVWithNames and TSKV formats).", 0) \
|
||||
M(Bool, input_format_with_names_use_header, true, "For TSVWithNames and CSVWithNames input formats this controls whether format parser is to assume that column data appear in the input exactly as they are specified in the header.", 0) \
|
||||
M(Bool, input_format_import_nested_json, false, "Map nested JSON data to nested tables (it works for JSONEachRow format).", 0) \
|
||||
M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \
|
||||
M(Bool, input_format_defaults_for_omitted_fields, true, "For input data calculate default expressions for omitted fields (it works for JSONEachRow, CSV and TSV formats).", IMPORTANT) \
|
||||
M(Bool, input_format_tsv_empty_as_default, false, "Treat empty fields in TSV input as default values.", 0) \
|
||||
M(Bool, input_format_tsv_enum_as_number, false, "Treat inserted enum values in TSV formats as enum indices \\N", 0) \
|
||||
@ -439,7 +441,6 @@ class IColumn;
|
||||
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic' and 'best_effort'.", 0) \
|
||||
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
||||
\
|
||||
M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \
|
||||
M(Bool, input_format_values_interpret_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser and try to interpret it as SQL expression.", 0) \
|
||||
M(Bool, input_format_values_deduce_templates_of_expressions, true, "For Values format: if the field could not be parsed by streaming parser, run SQL parser, deduce template of the SQL expression, try to parse all rows using template and then interpret expression for all rows.", 0) \
|
||||
M(Bool, input_format_values_accurate_types_of_literals, true, "For Values format: when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues.", 0) \
|
||||
|
@ -300,7 +300,7 @@ static void serializeTextImpl(const IColumn & column, size_t row_num, WriteBuffe
|
||||
|
||||
|
||||
template <typename Reader>
|
||||
static void deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && read_nested)
|
||||
static void deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && read_nested, bool allow_unenclosed)
|
||||
{
|
||||
ColumnArray & column_array = assert_cast<ColumnArray &>(column);
|
||||
ColumnArray::Offsets & offsets = column_array.getOffsets();
|
||||
@ -308,7 +308,12 @@ static void deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && r
|
||||
IColumn & nested_column = column_array.getData();
|
||||
|
||||
size_t size = 0;
|
||||
assertChar('[', istr);
|
||||
|
||||
bool has_braces = false;
|
||||
if (checkChar('[', istr))
|
||||
has_braces = true;
|
||||
else if (!allow_unenclosed)
|
||||
throw Exception(ErrorCodes::CANNOT_READ_ARRAY_FROM_TEXT, "Array does not start with '[' character");
|
||||
|
||||
try
|
||||
{
|
||||
@ -320,7 +325,9 @@ static void deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && r
|
||||
if (*istr.position() == ',')
|
||||
++istr.position();
|
||||
else
|
||||
throw Exception("Cannot read array from text", ErrorCodes::CANNOT_READ_ARRAY_FROM_TEXT);
|
||||
throw Exception(ErrorCodes::CANNOT_READ_ARRAY_FROM_TEXT,
|
||||
"Cannot read array from text, expected comma or end of array, found '{}'",
|
||||
*istr.position());
|
||||
}
|
||||
|
||||
first = false;
|
||||
@ -335,7 +342,11 @@ static void deserializeTextImpl(IColumn & column, ReadBuffer & istr, Reader && r
|
||||
|
||||
skipWhitespaceIfAny(istr);
|
||||
}
|
||||
assertChar(']', istr);
|
||||
|
||||
if (has_braces)
|
||||
assertChar(']', istr);
|
||||
else /// If array is not enclosed in braces, we read until EOF.
|
||||
assertEOF(istr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -364,7 +375,7 @@ void DataTypeArray::deserializeText(IColumn & column, ReadBuffer & istr, const F
|
||||
[&](IColumn & nested_column)
|
||||
{
|
||||
nested->deserializeAsTextQuoted(nested_column, istr, settings);
|
||||
});
|
||||
}, false);
|
||||
}
|
||||
|
||||
void DataTypeArray::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
@ -390,7 +401,11 @@ void DataTypeArray::serializeTextJSON(const IColumn & column, size_t row_num, Wr
|
||||
|
||||
void DataTypeArray::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
deserializeTextImpl(column, istr, [&](IColumn & nested_column) { nested->deserializeAsTextJSON(nested_column, istr, settings); });
|
||||
deserializeTextImpl(column, istr,
|
||||
[&](IColumn & nested_column)
|
||||
{
|
||||
nested->deserializeAsTextJSON(nested_column, istr, settings);
|
||||
}, false);
|
||||
}
|
||||
|
||||
|
||||
@ -429,7 +444,23 @@ void DataTypeArray::deserializeTextCSV(IColumn & column, ReadBuffer & istr, cons
|
||||
String s;
|
||||
readCSV(s, istr, settings.csv);
|
||||
ReadBufferFromString rb(s);
|
||||
deserializeText(column, rb, settings);
|
||||
|
||||
if (settings.csv.input_format_arrays_as_nested_csv)
|
||||
{
|
||||
deserializeTextImpl(column, rb,
|
||||
[&](IColumn & nested_column)
|
||||
{
|
||||
nested->deserializeAsTextCSV(nested_column, rb, settings);
|
||||
}, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
deserializeTextImpl(column, rb,
|
||||
[&](IColumn & nested_column)
|
||||
{
|
||||
nested->deserializeAsTextQuoted(nested_column, rb, settings);
|
||||
}, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -64,6 +64,7 @@ FormatSettings getFormatSettings(const Context & context,
|
||||
format_settings.csv.empty_as_default = settings.input_format_defaults_for_omitted_fields;
|
||||
format_settings.csv.input_format_enum_as_number = settings.input_format_csv_enum_as_number;
|
||||
format_settings.csv.unquoted_null_literal_as_null = settings.input_format_csv_unquoted_null_literal_as_null;
|
||||
format_settings.csv.input_format_arrays_as_nested_csv = settings.input_format_csv_arrays_as_nested_csv;
|
||||
format_settings.custom.escaping_rule = settings.format_custom_escaping_rule;
|
||||
format_settings.custom.field_delimiter = settings.format_custom_field_delimiter;
|
||||
format_settings.custom.result_after_delimiter = settings.format_custom_result_after_delimiter;
|
||||
|
@ -71,6 +71,7 @@ struct FormatSettings
|
||||
bool empty_as_default = false;
|
||||
bool crlf_end_of_line = false;
|
||||
bool input_format_enum_as_number = false;
|
||||
bool input_format_arrays_as_nested_csv = false;
|
||||
} csv;
|
||||
|
||||
struct Custom
|
||||
|
@ -1216,7 +1216,10 @@ public:
|
||||
{
|
||||
return res;
|
||||
}
|
||||
else if (isColumnedAsDecimal(left_type) || isColumnedAsDecimal(right_type))
|
||||
else if ((isColumnedAsDecimal(left_type) || isColumnedAsDecimal(right_type))
|
||||
// Comparing Date and DateTime64 requires implicit conversion,
|
||||
// otherwise Date is treated as number.
|
||||
&& !(date_and_datetime && (isDate(left_type) || isDate(right_type))))
|
||||
{
|
||||
// compare
|
||||
if (!allowDecimalComparison(left_type, right_type) && !date_and_datetime)
|
||||
|
@ -212,18 +212,18 @@ void AsynchronousMetrics::update()
|
||||
{
|
||||
Int64 amount = total_memory_tracker.get();
|
||||
Int64 peak = total_memory_tracker.getPeak();
|
||||
Int64 new_peak = data.resident;
|
||||
Int64 new_amount = data.resident;
|
||||
|
||||
LOG_DEBUG(&Poco::Logger::get("AsynchronousMetrics"),
|
||||
"MemoryTracking: was {}, peak {}, will set to {} (RSS), difference: {}",
|
||||
ReadableSize(amount),
|
||||
ReadableSize(peak),
|
||||
ReadableSize(new_peak),
|
||||
ReadableSize(new_peak - peak)
|
||||
ReadableSize(new_amount),
|
||||
ReadableSize(new_amount - amount)
|
||||
);
|
||||
|
||||
total_memory_tracker.set(new_peak);
|
||||
CurrentMetrics::set(CurrentMetrics::MemoryTracking, new_peak);
|
||||
total_memory_tracker.set(new_amount);
|
||||
CurrentMetrics::set(CurrentMetrics::MemoryTracking, new_amount);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -247,6 +247,10 @@ void AsynchronousMetrics::update()
|
||||
size_t number_of_databases = databases.size();
|
||||
size_t total_number_of_tables = 0;
|
||||
|
||||
size_t total_number_of_bytes = 0;
|
||||
size_t total_number_of_rows = 0;
|
||||
size_t total_number_of_parts = 0;
|
||||
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
/// Check if database can contain MergeTree tables
|
||||
@ -295,6 +299,17 @@ void AsynchronousMetrics::update()
|
||||
if (table_merge_tree)
|
||||
{
|
||||
calculateMax(max_part_count_for_partition, table_merge_tree->getMaxPartsCountForPartition());
|
||||
const auto & settings = global_context.getSettingsRef();
|
||||
total_number_of_bytes += table_merge_tree->totalBytes(settings).value();
|
||||
total_number_of_rows += table_merge_tree->totalRows(settings).value();
|
||||
total_number_of_parts += table_merge_tree->getPartsCount();
|
||||
}
|
||||
if (table_replicated_merge_tree)
|
||||
{
|
||||
const auto & settings = global_context.getSettingsRef();
|
||||
total_number_of_bytes += table_replicated_merge_tree->totalBytes(settings).value();
|
||||
total_number_of_rows += table_replicated_merge_tree->totalRows(settings).value();
|
||||
total_number_of_parts += table_replicated_merge_tree->getPartsCount();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -315,6 +330,10 @@ void AsynchronousMetrics::update()
|
||||
new_values["NumberOfDatabases"] = number_of_databases;
|
||||
new_values["NumberOfTables"] = total_number_of_tables;
|
||||
|
||||
new_values["TotalBytesOfMergeTreeTables"] = total_number_of_bytes;
|
||||
new_values["TotalRowsOfMergeTreeTables"] = total_number_of_rows;
|
||||
new_values["TotalPartsOfMergeTreeTables"] = total_number_of_parts;
|
||||
|
||||
auto get_metric_name = [](const String & name) -> const char *
|
||||
{
|
||||
static std::map<String, const char *> metric_map = {
|
||||
@ -336,16 +355,22 @@ void AsynchronousMetrics::update()
|
||||
return it->second;
|
||||
};
|
||||
|
||||
for (const auto & server : servers_to_start_before_tables)
|
||||
if (servers_to_start_before_tables)
|
||||
{
|
||||
if (const auto * name = get_metric_name(server.getPortName()))
|
||||
new_values[name] = server.currentThreads();
|
||||
for (const auto & server : *servers_to_start_before_tables)
|
||||
{
|
||||
if (const auto * name = get_metric_name(server.getPortName()))
|
||||
new_values[name] = server.currentThreads();
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & server : servers)
|
||||
if (servers)
|
||||
{
|
||||
if (const auto * name = get_metric_name(server.getPortName()))
|
||||
new_values[name] = server.currentThreads();
|
||||
for (const auto & server : *servers)
|
||||
{
|
||||
if (const auto * name = get_metric_name(server.getPortName()))
|
||||
new_values[name] = server.currentThreads();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,14 +26,26 @@ using AsynchronousMetricValues = std::unordered_map<std::string, AsynchronousMet
|
||||
class AsynchronousMetrics
|
||||
{
|
||||
public:
|
||||
// The default value of update_period_seconds is for ClickHouse-over-YT
|
||||
// in Arcadia -- it uses its own server implementation that also uses these
|
||||
// metrics.
|
||||
#if defined(ARCADIA_BUILD)
|
||||
/// This constructor needs only to provide backward compatibility with some other projects (hello, Arcadia).
|
||||
/// Never use this in the ClickHouse codebase.
|
||||
AsynchronousMetrics(
|
||||
Context & global_context_,
|
||||
int update_period_seconds = 60)
|
||||
: global_context(global_context_)
|
||||
, update_period(update_period_seconds)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/// The default value of update_period_seconds is for ClickHouse-over-YT
|
||||
/// in Arcadia -- it uses its own server implementation that also uses these
|
||||
/// metrics.
|
||||
AsynchronousMetrics(
|
||||
Context & global_context_,
|
||||
int update_period_seconds,
|
||||
const std::vector<ProtocolServerAdapter> & servers_to_start_before_tables_,
|
||||
const std::vector<ProtocolServerAdapter> & servers_)
|
||||
std::shared_ptr<std::vector<ProtocolServerAdapter>> servers_to_start_before_tables_,
|
||||
std::shared_ptr<std::vector<ProtocolServerAdapter>> servers_)
|
||||
: global_context(global_context_)
|
||||
, update_period(update_period_seconds)
|
||||
, servers_to_start_before_tables(servers_to_start_before_tables_)
|
||||
@ -55,8 +67,8 @@ public:
|
||||
private:
|
||||
Context & global_context;
|
||||
const std::chrono::seconds update_period;
|
||||
const std::vector<ProtocolServerAdapter> & servers_to_start_before_tables;
|
||||
const std::vector<ProtocolServerAdapter> & servers;
|
||||
std::shared_ptr<std::vector<ProtocolServerAdapter>> servers_to_start_before_tables{nullptr};
|
||||
std::shared_ptr<std::vector<ProtocolServerAdapter>> servers{nullptr};
|
||||
|
||||
mutable std::mutex mutex;
|
||||
std::condition_variable wait_cond;
|
||||
|
@ -202,10 +202,10 @@ static inline std::tuple<NamesAndTypesList, NamesAndTypesList, NamesAndTypesList
|
||||
keys->arguments->children.insert(keys->arguments->children.end(),
|
||||
index_columns->children.begin(), index_columns->children.end());
|
||||
else if (startsWith(declare_index->index_type, "UNIQUE_"))
|
||||
unique_keys->arguments->children.insert(keys->arguments->children.end(),
|
||||
unique_keys->arguments->children.insert(unique_keys->arguments->children.end(),
|
||||
index_columns->children.begin(), index_columns->children.end());
|
||||
if (startsWith(declare_index->index_type, "PRIMARY_KEY_"))
|
||||
primary_keys->arguments->children.insert(keys->arguments->children.end(),
|
||||
primary_keys->arguments->children.insert(primary_keys->arguments->children.end(),
|
||||
index_columns->children.begin(), index_columns->children.end());
|
||||
}
|
||||
}
|
||||
|
@ -195,3 +195,15 @@ TEST(MySQLCreateRewritten, RewrittenQueryWithPrefixKey)
|
||||
"ReplacingMergeTree(_version) PARTITION BY intDiv(key, 4294967) ORDER BY (key, prefix_key)");
|
||||
}
|
||||
|
||||
TEST(MySQLCreateRewritten, UniqueKeysConvert)
|
||||
{
|
||||
tryRegisterFunctions();
|
||||
const auto & context_holder = getContext();
|
||||
|
||||
EXPECT_EQ(queryToString(tryRewrittenCreateQuery(
|
||||
"CREATE TABLE `test_database`.`test_table_1` (code varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,name varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,"
|
||||
" id bigint NOT NULL AUTO_INCREMENT, tenant_id bigint NOT NULL, PRIMARY KEY (id), UNIQUE KEY code_id (code, tenant_id), UNIQUE KEY name_id (name, tenant_id))"
|
||||
" ENGINE=InnoDB AUTO_INCREMENT=100 DEFAULT CHARSET=utf8 COLLATE=utf8_bin;", context_holder.context)),
|
||||
"CREATE TABLE test_database.test_table_1 (`code` String, `name` String, `id` Int64, `tenant_id` Int64, `_sign` Int8() MATERIALIZED 1, `_version` UInt64() MATERIALIZED 1)"
|
||||
" ENGINE = ReplacingMergeTree(_version) PARTITION BY intDiv(id, 18446744073709551) ORDER BY (code, name, tenant_id, id)");
|
||||
}
|
||||
|
@ -286,6 +286,17 @@ void removeUnneededColumnsFromSelectClause(const ASTSelectQuery * select_query,
|
||||
{
|
||||
new_elements.push_back(elem);
|
||||
}
|
||||
else
|
||||
{
|
||||
ASTFunction * func = elem->as<ASTFunction>();
|
||||
if (func && func->name == "untuple")
|
||||
for (const auto & col : required_result_columns)
|
||||
if (col.rfind("_ut_", 0) == 0)
|
||||
{
|
||||
new_elements.push_back(elem);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
elements = std::move(new_elements);
|
||||
@ -658,14 +669,24 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select
|
||||
for (const auto & name : columns_context.requiredColumns())
|
||||
ss << " '" << name << "'";
|
||||
|
||||
if (!source_column_names.empty())
|
||||
if (storage)
|
||||
{
|
||||
ss << ", source columns:";
|
||||
for (const auto & name : source_column_names)
|
||||
ss << " '" << name << "'";
|
||||
ss << ", maybe you meant: ";
|
||||
for (const auto & name : columns_context.requiredColumns())
|
||||
{
|
||||
auto hints = storage->getHints(name);
|
||||
if (!hints.empty())
|
||||
ss << " '" << toString(hints) << "'";
|
||||
}
|
||||
}
|
||||
else
|
||||
ss << ", no source columns";
|
||||
{
|
||||
if (!source_column_names.empty())
|
||||
for (const auto & name : columns_context.requiredColumns())
|
||||
ss << " '" << name << "'";
|
||||
else
|
||||
ss << ", no source columns";
|
||||
}
|
||||
|
||||
if (columns_context.has_table_join)
|
||||
{
|
||||
|
@ -53,18 +53,20 @@ Chunk IRowInputFormat::generate()
|
||||
///auto chunk_missing_values = std::make_unique<ChunkMissingValues>();
|
||||
block_missing_values.clear();
|
||||
|
||||
size_t num_rows = 0;
|
||||
|
||||
try
|
||||
{
|
||||
RowReadExtension info;
|
||||
for (size_t rows = 0; rows < params.max_block_size; ++rows)
|
||||
bool continue_reading = true;
|
||||
for (size_t rows = 0; rows < params.max_block_size && continue_reading; ++rows)
|
||||
{
|
||||
try
|
||||
{
|
||||
++total_rows;
|
||||
|
||||
info.read_columns.clear();
|
||||
if (!readRow(columns, info))
|
||||
break;
|
||||
continue_reading = readRow(columns, info);
|
||||
|
||||
for (size_t column_idx = 0; column_idx < info.read_columns.size(); ++column_idx)
|
||||
{
|
||||
@ -76,6 +78,18 @@ Chunk IRowInputFormat::generate()
|
||||
block_missing_values.setBit(column_idx, column_size - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Some formats may read row AND say the read is finished.
|
||||
/// For such a case, get the number or rows from first column.
|
||||
if (!columns.empty())
|
||||
num_rows = columns.front()->size();
|
||||
|
||||
if (!continue_reading)
|
||||
break;
|
||||
|
||||
/// The case when there is no columns. Just count rows.
|
||||
if (columns.empty())
|
||||
++num_rows;
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
@ -107,17 +121,13 @@ Chunk IRowInputFormat::generate()
|
||||
|
||||
syncAfterError();
|
||||
|
||||
/// Truncate all columns in block to minimal size (remove values, that was appended to only part of columns).
|
||||
|
||||
size_t min_size = std::numeric_limits<size_t>::max();
|
||||
for (size_t column_idx = 0; column_idx < num_columns; ++column_idx)
|
||||
min_size = std::min(min_size, columns[column_idx]->size());
|
||||
/// Truncate all columns in block to initial size (remove values, that was appended to only part of columns).
|
||||
|
||||
for (size_t column_idx = 0; column_idx < num_columns; ++column_idx)
|
||||
{
|
||||
auto & column = columns[column_idx];
|
||||
if (column->size() > min_size)
|
||||
column->popBack(column->size() - min_size);
|
||||
if (column->size() > num_rows)
|
||||
column->popBack(column->size() - num_rows);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -157,7 +167,6 @@ Chunk IRowInputFormat::generate()
|
||||
return {};
|
||||
}
|
||||
|
||||
auto num_rows = columns.front()->size();
|
||||
Chunk chunk(std::move(columns), num_rows);
|
||||
//chunk.setChunkInfo(std::move(chunk_missing_values));
|
||||
return chunk;
|
||||
|
@ -62,9 +62,9 @@ void ArrowBlockOutputFormat::prepareWriter(const std::shared_ptr<arrow::Schema>
|
||||
|
||||
// TODO: should we use arrow::ipc::IpcOptions::alignment?
|
||||
if (stream)
|
||||
writer_status = arrow::ipc::NewStreamWriter(arrow_ostream.get(), schema);
|
||||
writer_status = arrow::ipc::MakeStreamWriter(arrow_ostream.get(), schema);
|
||||
else
|
||||
writer_status = arrow::ipc::NewFileWriter(arrow_ostream.get(), schema);
|
||||
writer_status = arrow::ipc::MakeFileWriter(arrow_ostream.get(), schema);
|
||||
|
||||
if (!writer_status.ok())
|
||||
throw Exception(ErrorCodes::UNKNOWN_EXCEPTION,
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int TABLE_IS_DROPPED;
|
||||
@ -34,17 +33,18 @@ bool IStorage::isVirtualColumn(const String & column_name, const StorageMetadata
|
||||
}
|
||||
|
||||
RWLockImpl::LockHolder IStorage::tryLockTimed(
|
||||
const RWLock & rwlock, RWLockImpl::Type type, const String & query_id, const std::chrono::milliseconds & acquire_timeout) const
|
||||
const RWLock & rwlock, RWLockImpl::Type type, const String & query_id, const std::chrono::milliseconds & acquire_timeout) const
|
||||
{
|
||||
auto lock_holder = rwlock->getLock(type, query_id, acquire_timeout);
|
||||
if (!lock_holder)
|
||||
{
|
||||
const String type_str = type == RWLockImpl::Type::Read ? "READ" : "WRITE";
|
||||
throw Exception(
|
||||
type_str + " locking attempt on \"" + getStorageID().getFullTableName() +
|
||||
"\" has timed out! (" + std::to_string(acquire_timeout.count()) + "ms) "
|
||||
"Possible deadlock avoided. Client should retry.",
|
||||
ErrorCodes::DEADLOCK_AVOIDED);
|
||||
type_str + " locking attempt on \"" + getStorageID().getFullTableName() + "\" has timed out! ("
|
||||
+ std::to_string(acquire_timeout.count())
|
||||
+ "ms) "
|
||||
"Possible deadlock avoided. Client should retry.",
|
||||
ErrorCodes::DEADLOCK_AVOIDED);
|
||||
}
|
||||
return lock_holder;
|
||||
}
|
||||
@ -84,26 +84,26 @@ TableExclusiveLockHolder IStorage::lockExclusively(const String & query_id, cons
|
||||
}
|
||||
|
||||
Pipe IStorage::read(
|
||||
const Names & /*column_names*/,
|
||||
const StorageMetadataPtr & /*metadata_snapshot*/,
|
||||
SelectQueryInfo & /*query_info*/,
|
||||
const Context & /*context*/,
|
||||
QueryProcessingStage::Enum /*processed_stage*/,
|
||||
size_t /*max_block_size*/,
|
||||
unsigned /*num_streams*/)
|
||||
const Names & /*column_names*/,
|
||||
const StorageMetadataPtr & /*metadata_snapshot*/,
|
||||
SelectQueryInfo & /*query_info*/,
|
||||
const Context & /*context*/,
|
||||
QueryProcessingStage::Enum /*processed_stage*/,
|
||||
size_t /*max_block_size*/,
|
||||
unsigned /*num_streams*/)
|
||||
{
|
||||
throw Exception("Method read is not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void IStorage::read(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
const Context & context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
size_t max_block_size,
|
||||
unsigned num_streams)
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
const Context & context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
size_t max_block_size,
|
||||
unsigned num_streams)
|
||||
{
|
||||
auto pipe = read(column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
|
||||
if (pipe.empty())
|
||||
@ -119,15 +119,12 @@ void IStorage::read(
|
||||
}
|
||||
|
||||
Pipe IStorage::alterPartition(
|
||||
const StorageMetadataPtr & /* metadata_snapshot */,
|
||||
const PartitionCommands & /* commands */,
|
||||
const Context & /* context */)
|
||||
const StorageMetadataPtr & /* metadata_snapshot */, const PartitionCommands & /* commands */, const Context & /* context */)
|
||||
{
|
||||
throw Exception("Partition operations are not supported by storage " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void IStorage::alter(
|
||||
const AlterCommands & params, const Context & context, TableLockHolder &)
|
||||
void IStorage::alter(const AlterCommands & params, const Context & context, TableLockHolder &)
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
StorageInMemoryMetadata new_metadata = getInMemoryMetadata();
|
||||
@ -148,7 +145,8 @@ void IStorage::checkAlterIsPossible(const AlterCommands & commands, const Settin
|
||||
}
|
||||
}
|
||||
|
||||
void IStorage::checkAlterPartitionIsPossible(const PartitionCommands & /*commands*/, const StorageMetadataPtr & /*metadata_snapshot*/, const Settings & /*settings*/) const
|
||||
void IStorage::checkAlterPartitionIsPossible(
|
||||
const PartitionCommands & /*commands*/, const StorageMetadataPtr & /*metadata_snapshot*/, const Settings & /*settings*/) const
|
||||
{
|
||||
throw Exception("Table engine " + getName() + " doesn't support partitioning", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
@ -170,6 +168,15 @@ NamesAndTypesList IStorage::getVirtuals() const
|
||||
return {};
|
||||
}
|
||||
|
||||
Names IStorage::getAllRegisteredNames() const
|
||||
{
|
||||
Names result;
|
||||
auto getter = [](const auto & column) { return column.name; };
|
||||
const NamesAndTypesList & available_columns = getInMemoryMetadata().getColumns().getAllPhysical();
|
||||
std::transform(available_columns.begin(), available_columns.end(), std::back_inserter(result), getter);
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string PrewhereDAGInfo::dump() const
|
||||
{
|
||||
WriteBufferFromOwnString ss;
|
||||
@ -207,6 +214,5 @@ std::string FilterInfo::dump() const
|
||||
}
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ struct ColumnSize
|
||||
* - data storage structure (compression, etc.)
|
||||
* - concurrent access to data (locks, etc.)
|
||||
*/
|
||||
class IStorage : public std::enable_shared_from_this<IStorage>, public TypePromotion<IStorage>
|
||||
class IStorage : public std::enable_shared_from_this<IStorage>, public TypePromotion<IStorage>, public IHints<1, IStorage>
|
||||
{
|
||||
public:
|
||||
IStorage() = delete;
|
||||
@ -87,7 +87,6 @@ public:
|
||||
: storage_id(std::move(storage_id_))
|
||||
, metadata(std::make_unique<StorageInMemoryMetadata>()) {} //-V730
|
||||
|
||||
virtual ~IStorage() = default;
|
||||
IStorage(const IStorage &) = delete;
|
||||
IStorage & operator=(const IStorage &) = delete;
|
||||
|
||||
@ -169,6 +168,7 @@ public:
|
||||
/// By default return empty list of columns.
|
||||
virtual NamesAndTypesList getVirtuals() const;
|
||||
|
||||
Names getAllRegisteredNames() const override;
|
||||
protected:
|
||||
|
||||
/// Returns whether the column is virtual - by default all columns are real.
|
||||
|
@ -877,6 +877,8 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
std::lock_guard loading_lock(mutex);
|
||||
if (!data_parts_indexes.insert(part).second)
|
||||
throw Exception("Part " + part->name + " already exists", ErrorCodes::DUPLICATE_DATA_PART);
|
||||
|
||||
addPartContributionToDataVolume(part);
|
||||
});
|
||||
}
|
||||
|
||||
@ -893,6 +895,8 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
|
||||
if (!data_parts_indexes.insert(part).second)
|
||||
throw Exception("Part " + part->name + " already exists", ErrorCodes::DUPLICATE_DATA_PART);
|
||||
|
||||
addPartContributionToDataVolume(part);
|
||||
}
|
||||
|
||||
if (has_non_adaptive_parts && has_adaptive_parts && !settings->enable_mixed_granularity_parts)
|
||||
@ -924,6 +928,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
{
|
||||
(*it)->remove_time.store((*it)->modification_time, std::memory_order_relaxed);
|
||||
modifyPartState(it, DataPartState::Outdated);
|
||||
removePartContributionToDataVolume(*it);
|
||||
};
|
||||
|
||||
(*prev_jt)->assertState({DataPartState::Committed});
|
||||
@ -1292,6 +1297,8 @@ void MergeTreeData::dropAllData()
|
||||
}
|
||||
}
|
||||
|
||||
setDataVolume(0, 0, 0);
|
||||
|
||||
LOG_TRACE(log, "dropAllData: done.");
|
||||
}
|
||||
|
||||
@ -1987,16 +1994,25 @@ bool MergeTreeData::renameTempPartAndReplace(
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t reduce_bytes = 0;
|
||||
size_t reduce_rows = 0;
|
||||
size_t reduce_parts = 0;
|
||||
auto current_time = time(nullptr);
|
||||
for (const DataPartPtr & covered_part : covered_parts)
|
||||
{
|
||||
covered_part->remove_time.store(current_time, std::memory_order_relaxed);
|
||||
modifyPartState(covered_part, DataPartState::Outdated);
|
||||
removePartContributionToColumnSizes(covered_part);
|
||||
reduce_bytes += covered_part->getBytesOnDisk();
|
||||
reduce_rows += covered_part->rows_count;
|
||||
++reduce_parts;
|
||||
}
|
||||
|
||||
decreaseDataVolume(reduce_bytes, reduce_rows, reduce_parts);
|
||||
|
||||
modifyPartState(part_it, DataPartState::Committed);
|
||||
addPartContributionToColumnSizes(part);
|
||||
addPartContributionToDataVolume(part);
|
||||
}
|
||||
|
||||
auto part_in_memory = asInMemoryPart(part);
|
||||
@ -2037,7 +2053,10 @@ void MergeTreeData::removePartsFromWorkingSet(const MergeTreeData::DataPartsVect
|
||||
for (const DataPartPtr & part : remove)
|
||||
{
|
||||
if (part->state == IMergeTreeDataPart::State::Committed)
|
||||
{
|
||||
removePartContributionToColumnSizes(part);
|
||||
removePartContributionToDataVolume(part);
|
||||
}
|
||||
|
||||
if (part->state == IMergeTreeDataPart::State::Committed || clear_without_timeout)
|
||||
part->remove_time.store(remove_time, std::memory_order_relaxed);
|
||||
@ -2150,7 +2169,10 @@ restore_covered)
|
||||
DataPartPtr part = *it_part;
|
||||
|
||||
if (part->state == DataPartState::Committed)
|
||||
{
|
||||
removePartContributionToDataVolume(part);
|
||||
removePartContributionToColumnSizes(part);
|
||||
}
|
||||
modifyPartState(it_part, DataPartState::Deleting);
|
||||
|
||||
part->renameToDetached(prefix);
|
||||
@ -2198,6 +2220,7 @@ restore_covered)
|
||||
if ((*it)->state != DataPartState::Committed)
|
||||
{
|
||||
addPartContributionToColumnSizes(*it);
|
||||
addPartContributionToDataVolume(*it);
|
||||
modifyPartState(it, DataPartState::Committed); // iterator is not invalidated here
|
||||
}
|
||||
|
||||
@ -2228,6 +2251,7 @@ restore_covered)
|
||||
if ((*it)->state != DataPartState::Committed)
|
||||
{
|
||||
addPartContributionToColumnSizes(*it);
|
||||
addPartContributionToDataVolume(*it);
|
||||
modifyPartState(it, DataPartState::Committed);
|
||||
}
|
||||
|
||||
@ -2289,41 +2313,19 @@ void MergeTreeData::tryRemovePartImmediately(DataPartPtr && part)
|
||||
|
||||
size_t MergeTreeData::getTotalActiveSizeInBytes() const
|
||||
{
|
||||
size_t res = 0;
|
||||
{
|
||||
auto lock = lockParts();
|
||||
|
||||
for (const auto & part : getDataPartsStateRange(DataPartState::Committed))
|
||||
res += part->getBytesOnDisk();
|
||||
}
|
||||
|
||||
return res;
|
||||
return total_active_size_bytes.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
|
||||
size_t MergeTreeData::getTotalActiveSizeInRows() const
|
||||
{
|
||||
size_t res = 0;
|
||||
{
|
||||
auto lock = lockParts();
|
||||
|
||||
for (const auto & part : getDataPartsStateRange(DataPartState::Committed))
|
||||
res += part->rows_count;
|
||||
}
|
||||
|
||||
return res;
|
||||
return total_active_size_rows.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
|
||||
size_t MergeTreeData::getPartsCount() const
|
||||
{
|
||||
auto lock = lockParts();
|
||||
|
||||
size_t res = 0;
|
||||
for (const auto & part [[maybe_unused]] : getDataPartsStateRange(DataPartState::Committed))
|
||||
++res;
|
||||
|
||||
return res;
|
||||
return total_active_size_parts.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
|
||||
@ -2452,6 +2454,9 @@ void MergeTreeData::swapActivePart(MergeTreeData::DataPartPtr part_copy)
|
||||
auto part_it = data_parts_indexes.insert(part_copy).first;
|
||||
modifyPartState(part_it, DataPartState::Committed);
|
||||
|
||||
removePartContributionToDataVolume(original_active_part);
|
||||
addPartContributionToDataVolume(part_copy);
|
||||
|
||||
auto disk = original_active_part->volume->getDisk();
|
||||
String marker_path = original_active_part->getFullRelativePath() + IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME;
|
||||
try
|
||||
@ -3349,6 +3354,15 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData:
|
||||
auto * owing_parts_lock = acquired_parts_lock ? acquired_parts_lock : &parts_lock;
|
||||
|
||||
auto current_time = time(nullptr);
|
||||
|
||||
size_t add_bytes = 0;
|
||||
size_t add_rows = 0;
|
||||
size_t add_parts = 0;
|
||||
|
||||
size_t reduce_bytes = 0;
|
||||
size_t reduce_rows = 0;
|
||||
size_t reduce_parts = 0;
|
||||
|
||||
for (const DataPartPtr & part : precommitted_parts)
|
||||
{
|
||||
DataPartPtr covering_part;
|
||||
@ -3366,14 +3380,25 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData:
|
||||
for (const DataPartPtr & covered_part : covered_parts)
|
||||
{
|
||||
covered_part->remove_time.store(current_time, std::memory_order_relaxed);
|
||||
|
||||
reduce_bytes += covered_part->getBytesOnDisk();
|
||||
reduce_rows += covered_part->rows_count;
|
||||
|
||||
data.modifyPartState(covered_part, DataPartState::Outdated);
|
||||
data.removePartContributionToColumnSizes(covered_part);
|
||||
}
|
||||
reduce_parts += covered_parts.size();
|
||||
|
||||
add_bytes += part->getBytesOnDisk();
|
||||
add_rows += part->rows_count;
|
||||
++add_parts;
|
||||
|
||||
data.modifyPartState(part, DataPartState::Committed);
|
||||
data.addPartContributionToColumnSizes(part);
|
||||
}
|
||||
}
|
||||
data.decreaseDataVolume(reduce_bytes, reduce_rows, reduce_parts);
|
||||
data.increaseDataVolume(add_bytes, add_rows, add_parts);
|
||||
}
|
||||
|
||||
clear();
|
||||
@ -3918,4 +3943,34 @@ size_t MergeTreeData::getTotalMergesWithTTLInMergeList() const
|
||||
return global_context.getMergeList().getExecutingMergesWithTTLCount();
|
||||
}
|
||||
|
||||
void MergeTreeData::addPartContributionToDataVolume(const DataPartPtr & part)
|
||||
{
|
||||
increaseDataVolume(part->getBytesOnDisk(), part->rows_count, 1);
|
||||
}
|
||||
|
||||
void MergeTreeData::removePartContributionToDataVolume(const DataPartPtr & part)
|
||||
{
|
||||
decreaseDataVolume(part->getBytesOnDisk(), part->rows_count, 1);
|
||||
}
|
||||
|
||||
void MergeTreeData::increaseDataVolume(size_t bytes, size_t rows, size_t parts)
|
||||
{
|
||||
total_active_size_bytes.fetch_add(bytes, std::memory_order_acq_rel);
|
||||
total_active_size_rows.fetch_add(rows, std::memory_order_acq_rel);
|
||||
total_active_size_parts.fetch_add(parts, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
void MergeTreeData::decreaseDataVolume(size_t bytes, size_t rows, size_t parts)
|
||||
{
|
||||
total_active_size_bytes.fetch_sub(bytes, std::memory_order_acq_rel);
|
||||
total_active_size_rows.fetch_sub(rows, std::memory_order_acq_rel);
|
||||
total_active_size_parts.fetch_sub(parts, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
void MergeTreeData::setDataVolume(size_t bytes, size_t rows, size_t parts)
|
||||
{
|
||||
total_active_size_bytes.store(bytes, std::memory_order_release);
|
||||
total_active_size_rows.store(rows, std::memory_order_release);
|
||||
total_active_size_parts.store(parts, std::memory_order_release);
|
||||
}
|
||||
}
|
||||
|
@ -945,6 +945,18 @@ private:
|
||||
virtual void startBackgroundMovesIfNeeded() = 0;
|
||||
|
||||
bool allow_nullable_key{};
|
||||
|
||||
void addPartContributionToDataVolume(const DataPartPtr & part);
|
||||
void removePartContributionToDataVolume(const DataPartPtr & part);
|
||||
|
||||
void increaseDataVolume(size_t bytes, size_t rows, size_t parts);
|
||||
void decreaseDataVolume(size_t bytes, size_t rows, size_t parts);
|
||||
|
||||
void setDataVolume(size_t bytes, size_t rows, size_t parts);
|
||||
|
||||
std::atomic<size_t> total_active_size_bytes = 0;
|
||||
std::atomic<size_t> total_active_size_rows = 0;
|
||||
std::atomic<size_t> total_active_size_parts = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -111,9 +111,6 @@ struct Settings;
|
||||
M(Bool, remove_empty_parts, true, "Remove empty parts after they were pruned by TTL, mutation, or collapsing merge algorithm", 0) \
|
||||
M(Bool, assign_part_uuids, false, "Generate UUIDs for parts. Before enabling check that all replicas support new format.", 0) \
|
||||
\
|
||||
/** Settings for testing purposes */ \
|
||||
M(Bool, randomize_part_type, false, "For testing purposes only. Randomizes part type between wide and compact", 0) \
|
||||
\
|
||||
/** Obsolete settings. Kept for backward compatibility only. */ \
|
||||
M(UInt64, min_relative_delay_to_yield_leadership, 120, "Obsolete setting, does nothing.", 0) \
|
||||
M(UInt64, check_delay_period, 60, "Obsolete setting, does nothing.", 0) \
|
||||
|
@ -234,25 +234,6 @@ If you use the Replicated version of engines, see https://clickhouse.tech/docs/e
|
||||
}
|
||||
|
||||
|
||||
static void randomizePartTypeSettings(const std::unique_ptr<MergeTreeSettings> & storage_settings)
|
||||
{
|
||||
static constexpr auto MAX_THRESHOLD_FOR_ROWS = 100000;
|
||||
static constexpr auto MAX_THRESHOLD_FOR_BYTES = 1024 * 1024 * 10;
|
||||
|
||||
/// Create all parts in wide format with probability 1/3.
|
||||
if (thread_local_rng() % 3 == 0)
|
||||
{
|
||||
storage_settings->min_rows_for_wide_part = 0;
|
||||
storage_settings->min_bytes_for_wide_part = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
storage_settings->min_rows_for_wide_part = std::uniform_int_distribution{0, MAX_THRESHOLD_FOR_ROWS}(thread_local_rng);
|
||||
storage_settings->min_bytes_for_wide_part = std::uniform_int_distribution{0, MAX_THRESHOLD_FOR_BYTES}(thread_local_rng);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
{
|
||||
/** [Replicated][|Summing|Collapsing|Aggregating|Replacing|Graphite]MergeTree (2 * 7 combinations) engines
|
||||
@ -737,20 +718,6 @@ static StoragePtr create(const StorageFactory::Arguments & args)
|
||||
++arg_num;
|
||||
}
|
||||
|
||||
/// Allow to randomize part type for tests to cover more cases.
|
||||
/// But if settings were set explicitly restrict it.
|
||||
if (storage_settings->randomize_part_type
|
||||
&& !storage_settings->min_rows_for_wide_part.changed
|
||||
&& !storage_settings->min_bytes_for_wide_part.changed)
|
||||
{
|
||||
randomizePartTypeSettings(storage_settings);
|
||||
LOG_INFO(&Poco::Logger::get(args.table_id.getNameForLogs() + " (registerStorageMergeTree)"),
|
||||
"Applied setting 'randomize_part_type'. "
|
||||
"Setting 'min_rows_for_wide_part' changed to {}. "
|
||||
"Setting 'min_bytes_for_wide_part' changed to {}.",
|
||||
storage_settings->min_rows_for_wide_part, storage_settings->min_bytes_for_wide_part);
|
||||
}
|
||||
|
||||
if (arg_num != arg_cnt)
|
||||
throw Exception("Wrong number of engine arguments.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
|
@ -386,16 +386,17 @@ static void appendBlock(const Block & from, Block & to)
|
||||
|
||||
MemoryTracker::BlockerInThread temporarily_disable_memory_tracker;
|
||||
|
||||
MutableColumnPtr last_col;
|
||||
try
|
||||
{
|
||||
for (size_t column_no = 0, columns = to.columns(); column_no < columns; ++column_no)
|
||||
{
|
||||
const IColumn & col_from = *from.getByPosition(column_no).column.get();
|
||||
MutableColumnPtr col_to = IColumn::mutate(std::move(to.getByPosition(column_no).column));
|
||||
last_col = IColumn::mutate(std::move(to.getByPosition(column_no).column));
|
||||
|
||||
col_to->insertRangeFrom(col_from, 0, rows);
|
||||
last_col->insertRangeFrom(col_from, 0, rows);
|
||||
|
||||
to.getByPosition(column_no).column = std::move(col_to);
|
||||
to.getByPosition(column_no).column = std::move(last_col);
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
@ -406,6 +407,16 @@ static void appendBlock(const Block & from, Block & to)
|
||||
for (size_t column_no = 0, columns = to.columns(); column_no < columns; ++column_no)
|
||||
{
|
||||
ColumnPtr & col_to = to.getByPosition(column_no).column;
|
||||
/// If there is no column, then the exception was thrown in the middle of append, in the insertRangeFrom()
|
||||
if (!col_to)
|
||||
{
|
||||
col_to = std::move(last_col);
|
||||
/// Suppress clang-tidy [bugprone-use-after-move]
|
||||
last_col = {};
|
||||
}
|
||||
/// But if there is still nothing, abort
|
||||
if (!col_to)
|
||||
throw Exception("No column to rollback", ErrorCodes::LOGICAL_ERROR);
|
||||
if (col_to->size() != old_rows)
|
||||
col_to = col_to->cut(0, old_rows);
|
||||
}
|
||||
|
@ -1124,41 +1124,71 @@ ActionLock StorageMergeTree::stopMergesAndWait()
|
||||
}
|
||||
|
||||
|
||||
MergeTreeDataPartPtr StorageMergeTree::outdatePart(const String & part_name, bool force)
|
||||
{
|
||||
|
||||
if (force)
|
||||
{
|
||||
/// Forcefully stop merges and make part outdated
|
||||
auto merge_blocker = stopMergesAndWait();
|
||||
auto part = getPartIfExists(part_name, {MergeTreeDataPartState::Committed});
|
||||
if (!part)
|
||||
throw Exception("Part " + part_name + " not found, won't try to drop it.", ErrorCodes::NO_SUCH_DATA_PART);
|
||||
removePartsFromWorkingSet({part}, true);
|
||||
return part;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
/// Wait merges selector
|
||||
std::unique_lock lock(currently_processing_in_background_mutex);
|
||||
|
||||
auto part = getPartIfExists(part_name, {MergeTreeDataPartState::Committed});
|
||||
/// It's okay, part was already removed
|
||||
if (!part)
|
||||
return nullptr;
|
||||
|
||||
/// Part will be "removed" by merge or mutation, it's OK in case of some
|
||||
/// background cleanup processes like removing of empty parts.
|
||||
if (currently_merging_mutating_parts.count(part))
|
||||
return nullptr;
|
||||
|
||||
removePartsFromWorkingSet({part}, true);
|
||||
return part;
|
||||
}
|
||||
}
|
||||
|
||||
void StorageMergeTree::dropPartition(const ASTPtr & partition, bool detach, bool drop_part, const Context & context, bool throw_if_noop)
|
||||
{
|
||||
{
|
||||
/// Asks to complete merges and does not allow them to start.
|
||||
/// This protects against "revival" of data for a removed partition after completion of merge.
|
||||
auto merge_blocker = stopMergesAndWait();
|
||||
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
|
||||
MergeTreeData::DataPartsVector parts_to_remove;
|
||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||
|
||||
if (drop_part)
|
||||
{
|
||||
String part_name = partition->as<ASTLiteral &>().value.safeGet<String>();
|
||||
auto part = getPartIfExists(part_name, {MergeTreeDataPartState::Committed});
|
||||
|
||||
if (part)
|
||||
parts_to_remove.push_back(part);
|
||||
else if (throw_if_noop)
|
||||
throw Exception("Part " + part_name + " not found, won't try to drop it.", ErrorCodes::NO_SUCH_DATA_PART);
|
||||
else
|
||||
auto part = outdatePart(partition->as<ASTLiteral &>().value.safeGet<String>(), throw_if_noop);
|
||||
/// Nothing to do, part was removed in some different way
|
||||
if (!part)
|
||||
return;
|
||||
|
||||
parts_to_remove.push_back(part);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Asks to complete merges and does not allow them to start.
|
||||
/// This protects against "revival" of data for a removed partition after completion of merge.
|
||||
auto merge_blocker = stopMergesAndWait();
|
||||
String partition_id = getPartitionIDFromQuery(partition, context);
|
||||
parts_to_remove = getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id);
|
||||
}
|
||||
|
||||
// TODO should we throw an exception if parts_to_remove is empty?
|
||||
removePartsFromWorkingSet(parts_to_remove, true);
|
||||
/// TODO should we throw an exception if parts_to_remove is empty?
|
||||
removePartsFromWorkingSet(parts_to_remove, true);
|
||||
}
|
||||
|
||||
if (detach)
|
||||
{
|
||||
/// If DETACH clone parts to detached/ directory
|
||||
/// NOTE: no race with background cleanup until we hold pointers to parts
|
||||
for (const auto & part : parts_to_remove)
|
||||
{
|
||||
LOG_INFO(log, "Detaching {}", part->relative_path);
|
||||
|
@ -107,7 +107,7 @@ private:
|
||||
BackgroundMovesExecutor background_moves_executor;
|
||||
|
||||
/// For block numbers.
|
||||
SimpleIncrement increment{0};
|
||||
SimpleIncrement increment;
|
||||
|
||||
/// For clearOldParts, clearOldTemporaryDirectories.
|
||||
AtomicStopwatch time_after_previous_cleanup;
|
||||
@ -135,6 +135,10 @@ private:
|
||||
*/
|
||||
bool merge(bool aggressive, const String & partition_id, bool final, bool deduplicate, const Names & deduplicate_by_columns, String * out_disable_reason = nullptr, bool optimize_skip_merged_partitions = false);
|
||||
|
||||
/// Make part state outdated and queue it to remove without timeout
|
||||
/// If force, then stop merges and block them until part state became outdated. Throw exception if part doesn't exists
|
||||
/// If not force, then take merges selector and check that part is not participating in background operations.
|
||||
MergeTreeDataPartPtr outdatePart(const String & part_name, bool force);
|
||||
ActionLock stopMergesAndWait();
|
||||
|
||||
/// Allocate block number for new mutation, write mutation to disk
|
||||
|
@ -33,12 +33,24 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
const pid_t expected_pid = getpid();
|
||||
// Initialized in StorageSystemStackTrace's ctor and used in signalHandler.
|
||||
std::atomic<pid_t> expected_pid;
|
||||
const int sig = SIGRTMIN;
|
||||
|
||||
std::atomic<int> sequence_num = 0; /// For messages sent via pipe.
|
||||
std::atomic<int> data_ready_num = 0;
|
||||
std::atomic<bool> signal_latch = false; /// Only need for thread sanitizer.
|
||||
|
||||
std::optional<StackTrace> stack_trace;
|
||||
/** Notes:
|
||||
* Only one query from the table can be processed at the moment of time.
|
||||
* This is ensured by the mutex in fillData function.
|
||||
* We obtain information about threads by sending signal and receiving info from the signal handler.
|
||||
* Information is passed via global variables and pipe is used for signaling.
|
||||
* Actually we can send all information via pipe, but we read from it with timeout just in case,
|
||||
* so it's convenient to use is only for signaling.
|
||||
*/
|
||||
|
||||
StackTrace stack_trace{NoCapture{}};
|
||||
|
||||
constexpr size_t max_query_id_size = 128;
|
||||
char query_id_data[max_query_id_size];
|
||||
@ -56,25 +68,34 @@ namespace
|
||||
return;
|
||||
|
||||
/// Signal received too late.
|
||||
if (info->si_value.sival_int != sequence_num.load(std::memory_order_relaxed))
|
||||
int notification_num = info->si_value.sival_int;
|
||||
if (notification_num != sequence_num.load(std::memory_order_acquire))
|
||||
return;
|
||||
|
||||
bool expected = false;
|
||||
if (!signal_latch.compare_exchange_strong(expected, true, std::memory_order_acquire))
|
||||
return;
|
||||
|
||||
/// All these methods are signal-safe.
|
||||
const ucontext_t signal_context = *reinterpret_cast<ucontext_t *>(context);
|
||||
stack_trace.emplace(signal_context);
|
||||
stack_trace = StackTrace(signal_context);
|
||||
|
||||
StringRef query_id = CurrentThread::getQueryId();
|
||||
query_id_size = std::min(query_id.size, max_query_id_size);
|
||||
if (query_id.data && query_id.size)
|
||||
memcpy(query_id_data, query_id.data, query_id_size);
|
||||
|
||||
int notification_num = info->si_value.sival_int;
|
||||
/// This is unneeded (because we synchronize through pipe) but makes TSan happy.
|
||||
data_ready_num.store(notification_num, std::memory_order_release);
|
||||
|
||||
ssize_t res = ::write(notification_pipe.fds_rw[1], ¬ification_num, sizeof(notification_num));
|
||||
|
||||
/// We cannot do anything if write failed.
|
||||
(void)res;
|
||||
|
||||
errno = saved_errno;
|
||||
|
||||
signal_latch.store(false, std::memory_order_release);
|
||||
}
|
||||
|
||||
/// Wait for data in pipe and read it.
|
||||
@ -132,7 +153,7 @@ StorageSystemStackTrace::StorageSystemStackTrace(const StorageID & table_id_)
|
||||
notification_pipe.open();
|
||||
|
||||
/// Setup signal handler.
|
||||
|
||||
expected_pid = getpid();
|
||||
struct sigaction sa{};
|
||||
sa.sa_sigaction = signalHandler;
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
@ -179,7 +200,7 @@ void StorageSystemStackTrace::fillData(MutableColumns & res_columns, const Conte
|
||||
pid_t tid = parse<pid_t>(it->path().filename());
|
||||
|
||||
sigval sig_value{};
|
||||
sig_value.sival_int = sequence_num.load(std::memory_order_relaxed);
|
||||
sig_value.sival_int = sequence_num.load(std::memory_order_acquire);
|
||||
if (0 != ::sigqueue(tid, sig, sig_value))
|
||||
{
|
||||
/// The thread may has been already finished.
|
||||
@ -191,15 +212,15 @@ void StorageSystemStackTrace::fillData(MutableColumns & res_columns, const Conte
|
||||
|
||||
/// Just in case we will wait for pipe with timeout. In case signal didn't get processed.
|
||||
|
||||
if (wait(100))
|
||||
if (wait(100) && sig_value.sival_int == data_ready_num.load(std::memory_order_acquire))
|
||||
{
|
||||
size_t stack_trace_size = stack_trace->getSize();
|
||||
size_t stack_trace_offset = stack_trace->getOffset();
|
||||
size_t stack_trace_size = stack_trace.getSize();
|
||||
size_t stack_trace_offset = stack_trace.getOffset();
|
||||
|
||||
Array arr;
|
||||
arr.reserve(stack_trace_size - stack_trace_offset);
|
||||
for (size_t i = stack_trace_offset; i < stack_trace_size; ++i)
|
||||
arr.emplace_back(reinterpret_cast<intptr_t>(stack_trace->getFramePointers()[i]));
|
||||
arr.emplace_back(reinterpret_cast<intptr_t>(stack_trace.getFramePointers()[i]));
|
||||
|
||||
res_columns[0]->insert(tid);
|
||||
res_columns[1]->insertData(query_id_data, query_id_size);
|
||||
@ -214,7 +235,11 @@ void StorageSystemStackTrace::fillData(MutableColumns & res_columns, const Conte
|
||||
res_columns[2]->insertDefault();
|
||||
}
|
||||
|
||||
++sequence_num; /// FYI: For signed Integral types, arithmetic is defined to use two’s complement representation. There are no undefined results.
|
||||
/// Signed integer overflow is undefined behavior in both C and C++. However, according to
|
||||
/// C++ standard, Atomic signed integer arithmetic is defined to use two's complement; there
|
||||
/// are no undefined results. See https://en.cppreference.com/w/cpp/atomic/atomic and
|
||||
/// http://eel.is/c++draft/atomics.types.generic#atomics.types.int-8
|
||||
++sequence_num;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ else ()
|
||||
include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake)
|
||||
endif ()
|
||||
|
||||
install (PROGRAMS clickhouse-test clickhouse-test-server DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
install (PROGRAMS clickhouse-test DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
install (
|
||||
DIRECTORY queries performance config
|
||||
DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse-test
|
||||
@ -17,30 +17,6 @@ install (
|
||||
PATTERN ".gitignore" EXCLUDE
|
||||
)
|
||||
|
||||
install (FILES server-test.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-server COMPONENT clickhouse)
|
||||
install (FILES client-test.xml DESTINATION ${CLICKHOUSE_ETC_DIR}/clickhouse-client COMPONENT clickhouse)
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake ${ClickHouse_BINARY_DIR})
|
||||
|
||||
# maybe add --no-long ?
|
||||
# if you want disable some tests: env TEST_OPT0='--skip compile'
|
||||
if(TEST_CMAKE_PARALLEL)
|
||||
# NUMBER_OF_LOGICAL_CORES
|
||||
if (TEST_CMAKE_PARALLEL GREATER 1)
|
||||
set(TOTAL_TESTS ${TEST_CMAKE_PARALLEL})
|
||||
else()
|
||||
set(TOTAL_TESTS ${NUMBER_OF_LOGICAL_CORES})
|
||||
endif()
|
||||
foreach(proc RANGE 1 ${TOTAL_TESTS})
|
||||
add_test(NAME with_server${proc} COMMAND bash -c "env BUILD_DIR=${ClickHouse_BINARY_DIR} TEST_OPT0=--parallel=${proc}/${TOTAL_TESTS} ${CMAKE_CURRENT_SOURCE_DIR}/clickhouse-test-server")
|
||||
endforeach()
|
||||
else()
|
||||
add_test(NAME with_server COMMAND bash -c "env BUILD_DIR=${ClickHouse_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/clickhouse-test-server")
|
||||
endif()
|
||||
|
||||
endif ()
|
||||
|
||||
if (ENABLE_TEST_INTEGRATION)
|
||||
add_subdirectory (integration)
|
||||
endif ()
|
||||
|
@ -1,5 +0,0 @@
|
||||
set(CTEST_CUSTOM_TESTS_IGNORE
|
||||
example
|
||||
example64
|
||||
capnp-heavy-tests-run
|
||||
)
|
@ -417,6 +417,18 @@
|
||||
"with_coverage": false
|
||||
}
|
||||
},
|
||||
"Stress test (debug)": {
|
||||
"required_build_properties": {
|
||||
"compiler": "clang-11",
|
||||
"package_type": "deb",
|
||||
"build_type": "debug",
|
||||
"sanitizer": "none",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"clang-tidy": "disable",
|
||||
"with_coverage": false
|
||||
}
|
||||
},
|
||||
"Integration tests (asan)": {
|
||||
"required_build_properties": {
|
||||
"compiler": "clang-11",
|
||||
|
@ -251,7 +251,7 @@ stop_time = None
|
||||
|
||||
# def run_tests_array(all_tests, suite, suite_dir, suite_tmp_dir, run_total):
|
||||
def run_tests_array(all_tests_with_params):
|
||||
all_tests, suite, suite_dir, suite_tmp_dir, run_total = all_tests_with_params
|
||||
all_tests, suite, suite_dir, suite_tmp_dir = all_tests_with_params
|
||||
global exit_code
|
||||
global SERVER_DIED
|
||||
global stop_time
|
||||
@ -571,6 +571,8 @@ def main(args):
|
||||
Note: if you are using unbundled mode, you also have to specify -c option.")
|
||||
|
||||
build_flags = collect_build_flags(args.client)
|
||||
if args.antlr:
|
||||
build_flags.append('antlr')
|
||||
|
||||
if args.use_skip_list:
|
||||
tests_to_skip_from_list = collect_tests_to_skip(args.skip_list_path, build_flags)
|
||||
@ -688,7 +690,7 @@ def main(args):
|
||||
prefix, suffix = item.split('_', 1)
|
||||
|
||||
try:
|
||||
return reverse * int(prefix), suffix
|
||||
return reverse * int(prefix)
|
||||
except ValueError:
|
||||
return 99997
|
||||
|
||||
@ -698,6 +700,7 @@ def main(args):
|
||||
all_tests = [t for t in all_tests if any([re.search(r, t) for r in args.test])]
|
||||
all_tests.sort(key=key_func)
|
||||
|
||||
jobs = args.jobs
|
||||
parallel_tests = []
|
||||
sequential_tests = []
|
||||
for test in all_tests:
|
||||
@ -706,35 +709,32 @@ def main(args):
|
||||
else:
|
||||
parallel_tests.append(test)
|
||||
|
||||
print("Found", len(parallel_tests), "parallel tests and", len(sequential_tests), "sequential tests")
|
||||
run_n, run_total = args.parallel.split('/')
|
||||
run_n = float(run_n)
|
||||
run_total = float(run_total)
|
||||
tests_n = len(parallel_tests)
|
||||
if run_total > tests_n:
|
||||
run_total = tests_n
|
||||
if run_n > run_total:
|
||||
continue
|
||||
if jobs > 1 and len(parallel_tests) > 0:
|
||||
print("Found", len(parallel_tests), "parallel tests and", len(sequential_tests), "sequential tests")
|
||||
run_n, run_total = args.parallel.split('/')
|
||||
run_n = float(run_n)
|
||||
run_total = float(run_total)
|
||||
tests_n = len(parallel_tests)
|
||||
if run_total > tests_n:
|
||||
run_total = tests_n
|
||||
|
||||
jobs = args.jobs
|
||||
if jobs > tests_n:
|
||||
jobs = tests_n
|
||||
if jobs > run_total:
|
||||
run_total = jobs
|
||||
if jobs > tests_n:
|
||||
jobs = tests_n
|
||||
if jobs > run_total:
|
||||
run_total = jobs
|
||||
|
||||
batch_size = len(parallel_tests) // jobs
|
||||
parallel_tests_array = []
|
||||
for i in range(0, len(parallel_tests), batch_size):
|
||||
parallel_tests_array.append((parallel_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir, run_total))
|
||||
batch_size = len(parallel_tests) // jobs
|
||||
parallel_tests_array = []
|
||||
for i in range(0, len(parallel_tests), batch_size):
|
||||
parallel_tests_array.append((parallel_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir))
|
||||
|
||||
if jobs > 1:
|
||||
with closing(multiprocessing.Pool(processes=jobs)) as pool:
|
||||
pool.map(run_tests_array, parallel_tests_array)
|
||||
|
||||
run_tests_array((sequential_tests, suite, suite_dir, suite_tmp_dir, run_total))
|
||||
run_tests_array((sequential_tests, suite, suite_dir, suite_tmp_dir))
|
||||
total_tests_run += len(sequential_tests) + len(parallel_tests)
|
||||
else:
|
||||
run_tests_array((all_tests, suite, suite_dir, suite_tmp_dir, run_total))
|
||||
run_tests_array((all_tests, suite, suite_dir, suite_tmp_dir))
|
||||
total_tests_run += len(all_tests)
|
||||
|
||||
if args.hung_check:
|
||||
@ -886,6 +886,7 @@ if __name__ == '__main__':
|
||||
group=parser.add_mutually_exclusive_group(required=False)
|
||||
group.add_argument('--shard', action='store_true', default=None, dest='shard', help='Run sharding related tests (required to clickhouse-server listen 127.0.0.2 127.0.0.3)')
|
||||
group.add_argument('--no-shard', action='store_false', default=None, dest='shard', help='Do not run shard related tests')
|
||||
group.add_argument('--antlr', action='store_true', default=False, dest='antlr', help='Use new ANTLR parser in tests')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@ -965,6 +966,9 @@ if __name__ == '__main__':
|
||||
|
||||
os.environ['CLICKHOUSE_URL_PARAMS'] += get_additional_client_options_url(args)
|
||||
|
||||
if args.antlr:
|
||||
os.environ['CLICKHOUSE_CLIENT_OPT'] += ' --use_antlr_parser=1'
|
||||
|
||||
if args.extract_from_config is None:
|
||||
if os.access(args.binary + '-extract-from-config', os.X_OK):
|
||||
args.extract_from_config = args.binary + '-extract-from-config'
|
||||
|
@ -1,4 +1,4 @@
|
||||
<!-- Config for connecting to test server -->
|
||||
<!-- Config for connecting to test server in Arcadia -->
|
||||
<yandex>
|
||||
<tcp_port>59000</tcp_port>
|
||||
<tcp_port_secure>59440</tcp_port_secure>
|
||||
|
36
tests/performance/ColumnMap.xml
Normal file
36
tests/performance/ColumnMap.xml
Normal file
@ -0,0 +1,36 @@
|
||||
<test>
|
||||
|
||||
<settings>
|
||||
<allow_experimental_map_type>1</allow_experimental_map_type>
|
||||
</settings>
|
||||
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>key_suffix</name>
|
||||
<values>
|
||||
<value>''</value>
|
||||
<value>'-miss'</value>
|
||||
</values>
|
||||
</substitution>
|
||||
</substitutions>
|
||||
|
||||
<create_query>
|
||||
CREATE TABLE column_map_test
|
||||
ENGINE = MergeTree ORDER BY number
|
||||
AS
|
||||
SELECT number, map
|
||||
FROM
|
||||
(
|
||||
SELECT
|
||||
number,
|
||||
arrayMap(x -> toString(x), range(100)) AS keys,
|
||||
arrayMap(x -> toString(x * x), range(100)) AS values,
|
||||
cast((keys, values), 'Map(String, String)') AS map
|
||||
FROM numbers(10000)
|
||||
)
|
||||
</create_query>
|
||||
|
||||
<query>SELECT count() FROM column_map_test WHERE NOT ignore(arrayMap(x -> map[CONCAT(toString(x), {key_suffix})], range(0, 100, 10)))</query>
|
||||
|
||||
<drop_query>DROP TABLE IF EXISTS column_map_test</drop_query>
|
||||
</test>
|
@ -1,54 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
package parquet_create_table_columns;
|
||||
use strict;
|
||||
no warnings 'experimental';
|
||||
use feature 'signatures';
|
||||
use JSON::XS;
|
||||
#use Data::Dumper;
|
||||
|
||||
sub file_read($file) {
|
||||
open my $f, '<', $file or return;
|
||||
local $/ = undef;
|
||||
my $ret = <$f>;
|
||||
close $f;
|
||||
return $ret;
|
||||
}
|
||||
|
||||
our $type_parquet_logical_to_clickhouse = {
|
||||
DECIMAL => 'Decimal128(1)',
|
||||
TIMESTAMP_MICROS => 'DateTime',
|
||||
TIMESTAMP_MILLIS => 'DateTime',
|
||||
};
|
||||
our $type_parquet_physical_to_clickhouse = {
|
||||
BOOLEAN => 'UInt8',
|
||||
INT32 => 'Int32',
|
||||
INT64 => 'Int64',
|
||||
FLOAT => 'Float32',
|
||||
DOUBLE => 'Float64',
|
||||
BYTE_ARRAY => 'String',
|
||||
FIXED_LEN_BYTE_ARRAY => 'String', # Maybe FixedString?
|
||||
INT96 => 'Int64', # TODO!
|
||||
};
|
||||
|
||||
sub columns ($json) {
|
||||
my @list;
|
||||
my %uniq;
|
||||
for my $column (@{$json->{Columns}}) {
|
||||
#warn Data::Dumper::Dumper $column;
|
||||
my $name = $column->{'Name'};
|
||||
my $type = $type_parquet_logical_to_clickhouse->{$column->{'LogicalType'}} || $type_parquet_physical_to_clickhouse->{$column->{'PhysicalType'}};
|
||||
unless ($type) {
|
||||
warn "Unknown type [$column->{'PhysicalType'}:$column->{'LogicalType'}] of column [$name]";
|
||||
}
|
||||
$type = "Nullable($type)";
|
||||
$name .= $column->{'Id'} if $uniq{$name}++; # Names can be non-unique
|
||||
push @list, {name => $name, type => $type};
|
||||
}
|
||||
print join ', ', map {"`$_->{name}` $_->{type}"} @list;
|
||||
}
|
||||
|
||||
sub columns_file ($file) {
|
||||
return columns(JSON::XS::decode_json(file_read($file)));
|
||||
}
|
||||
|
||||
columns_file(shift) unless caller;
|
@ -13,134 +13,220 @@
|
||||
=== Try load data from alltypes_plain.snappy.parquet
|
||||
6 1 0 0 0 0 0 0 04/01/09 0 1238544000
|
||||
7 0 1 1 1 10 1.1 10.1 04/01/09 1 1238544060
|
||||
=== Try load data from binary.parquet
|
||||
\0
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
\b
|
||||
\t
|
||||
\n
|
||||
|
||||
=== Try load data from byte_array_decimal.parquet
|
||||
1.0
|
||||
2.0
|
||||
3.0
|
||||
4.0
|
||||
5.0
|
||||
6.0
|
||||
7.0
|
||||
8.0
|
||||
9.0
|
||||
10.0
|
||||
11.0
|
||||
12.0
|
||||
13.0
|
||||
14.0
|
||||
15.0
|
||||
16.0
|
||||
17.0
|
||||
18.0
|
||||
19.0
|
||||
20.0
|
||||
21.0
|
||||
22.0
|
||||
23.0
|
||||
24.0
|
||||
1.00
|
||||
2.00
|
||||
3.00
|
||||
4.00
|
||||
5.00
|
||||
6.00
|
||||
7.00
|
||||
8.00
|
||||
9.00
|
||||
10.00
|
||||
11.00
|
||||
12.00
|
||||
13.00
|
||||
14.00
|
||||
15.00
|
||||
16.00
|
||||
17.00
|
||||
18.00
|
||||
19.00
|
||||
20.00
|
||||
21.00
|
||||
22.00
|
||||
23.00
|
||||
24.00
|
||||
=== Try load data from datapage_v2.snappy.parquet
|
||||
Code: 33. DB::Ex---tion: Error while reading Parquet data: IOError: Not yet implemented: Unsupported encoding.: data for INSERT was parsed from stdin
|
||||
|
||||
=== Try load data from dict-page-offset-zero.parquet
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
1552
|
||||
=== Try load data from fixed_length_decimal.parquet
|
||||
1.00
|
||||
2.00
|
||||
3.00
|
||||
4.00
|
||||
5.00
|
||||
6.00
|
||||
7.00
|
||||
8.00
|
||||
9.00
|
||||
10.00
|
||||
11.00
|
||||
12.00
|
||||
13.00
|
||||
14.00
|
||||
15.00
|
||||
16.00
|
||||
17.00
|
||||
18.00
|
||||
19.00
|
||||
20.00
|
||||
21.00
|
||||
22.00
|
||||
23.00
|
||||
24.00
|
||||
=== Try load data from fixed_length_decimal_1.parquet
|
||||
1.0
|
||||
2.0
|
||||
3.0
|
||||
4.0
|
||||
5.0
|
||||
6.0
|
||||
7.0
|
||||
8.0
|
||||
9.0
|
||||
10.0
|
||||
11.0
|
||||
12.0
|
||||
13.0
|
||||
14.0
|
||||
15.0
|
||||
16.0
|
||||
17.0
|
||||
18.0
|
||||
19.0
|
||||
20.0
|
||||
21.0
|
||||
22.0
|
||||
23.0
|
||||
24.0
|
||||
1.00
|
||||
2.00
|
||||
3.00
|
||||
4.00
|
||||
5.00
|
||||
6.00
|
||||
7.00
|
||||
8.00
|
||||
9.00
|
||||
10.00
|
||||
11.00
|
||||
12.00
|
||||
13.00
|
||||
14.00
|
||||
15.00
|
||||
16.00
|
||||
17.00
|
||||
18.00
|
||||
19.00
|
||||
20.00
|
||||
21.00
|
||||
22.00
|
||||
23.00
|
||||
24.00
|
||||
=== Try load data from fixed_length_decimal_legacy.parquet
|
||||
1.0
|
||||
2.0
|
||||
3.0
|
||||
4.0
|
||||
5.0
|
||||
6.0
|
||||
7.0
|
||||
8.0
|
||||
9.0
|
||||
10.0
|
||||
11.0
|
||||
12.0
|
||||
13.0
|
||||
14.0
|
||||
15.0
|
||||
16.0
|
||||
17.0
|
||||
18.0
|
||||
19.0
|
||||
20.0
|
||||
21.0
|
||||
22.0
|
||||
23.0
|
||||
24.0
|
||||
1.00
|
||||
2.00
|
||||
3.00
|
||||
4.00
|
||||
5.00
|
||||
6.00
|
||||
7.00
|
||||
8.00
|
||||
9.00
|
||||
10.00
|
||||
11.00
|
||||
12.00
|
||||
13.00
|
||||
14.00
|
||||
15.00
|
||||
16.00
|
||||
17.00
|
||||
18.00
|
||||
19.00
|
||||
20.00
|
||||
21.00
|
||||
22.00
|
||||
23.00
|
||||
24.00
|
||||
=== Try load data from hadoop_lz4_compressed.parquet
|
||||
1593604800 abc 42
|
||||
1593604800 def 7.7
|
||||
1593604801 abc 42.125
|
||||
1593604801 def 7.7
|
||||
=== Try load data from int32_decimal.parquet
|
||||
1.0
|
||||
2.0
|
||||
3.0
|
||||
4.0
|
||||
5.0
|
||||
6.0
|
||||
7.0
|
||||
8.0
|
||||
9.0
|
||||
10.0
|
||||
11.0
|
||||
12.0
|
||||
13.0
|
||||
14.0
|
||||
15.0
|
||||
16.0
|
||||
17.0
|
||||
18.0
|
||||
19.0
|
||||
20.0
|
||||
21.0
|
||||
22.0
|
||||
23.0
|
||||
24.0
|
||||
1.00
|
||||
2.00
|
||||
3.00
|
||||
4.00
|
||||
5.00
|
||||
6.00
|
||||
7.00
|
||||
8.00
|
||||
9.00
|
||||
10.00
|
||||
11.00
|
||||
12.00
|
||||
13.00
|
||||
14.00
|
||||
15.00
|
||||
16.00
|
||||
17.00
|
||||
18.00
|
||||
19.00
|
||||
20.00
|
||||
21.00
|
||||
22.00
|
||||
23.00
|
||||
24.00
|
||||
=== Try load data from int64_decimal.parquet
|
||||
1.0
|
||||
2.0
|
||||
3.0
|
||||
4.0
|
||||
5.0
|
||||
6.0
|
||||
7.0
|
||||
8.0
|
||||
9.0
|
||||
10.0
|
||||
11.0
|
||||
12.0
|
||||
13.0
|
||||
14.0
|
||||
15.0
|
||||
16.0
|
||||
17.0
|
||||
18.0
|
||||
19.0
|
||||
20.0
|
||||
21.0
|
||||
22.0
|
||||
23.0
|
||||
24.0
|
||||
1.00
|
||||
2.00
|
||||
3.00
|
||||
4.00
|
||||
5.00
|
||||
6.00
|
||||
7.00
|
||||
8.00
|
||||
9.00
|
||||
10.00
|
||||
11.00
|
||||
12.00
|
||||
13.00
|
||||
14.00
|
||||
15.00
|
||||
16.00
|
||||
17.00
|
||||
18.00
|
||||
19.00
|
||||
20.00
|
||||
21.00
|
||||
22.00
|
||||
23.00
|
||||
24.00
|
||||
=== Try load data from list_columns.parquet
|
||||
Code: 70. DB::Ex---tion: The type "list" of an input column "int64_list" is not supported for conversion from a Parquet data format: data for INSERT was parsed from stdin
|
||||
|
||||
=== Try load data from nation.dict-malformed.parquet
|
||||
0 ALGERIA 0 haggle. carefully final deposits detect slyly agai
|
||||
1 ARGENTINA 1 al foxes promise slyly according to the regular accounts. bold requests alon
|
||||
@ -168,23 +254,25 @@ Code: 33. DB::Ex---tion: Error while reading Parquet data: IOError: Not yet impl
|
||||
23 UNITED KINGDOM 3 eans boost carefully special requests. accounts are. carefull
|
||||
24 UNITED STATES 1 y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be
|
||||
=== Try load data from nested_lists.snappy.parquet
|
||||
Code: 8. DB::Ex---tion: Column "element" is not presented in input data: data for INSERT was parsed from stdin
|
||||
Code: 70. DB::Ex---tion: The type "list" of an input column "a" is not supported for conversion from a Parquet data format: data for INSERT was parsed from stdin
|
||||
|
||||
=== Try load data from nested_maps.snappy.parquet
|
||||
Code: 33. DB::Ex---tion: Error while reading Parquet data: NotImplemented: Reading lists of structs from Parquet files not yet supported: key_value: list<key_value: struct<key: string not null, value: struct<key_value: list<key_value: struct<key: int32 not null, value: bool not null> not null> not null>> not null> not null: data for INSERT was parsed from stdin
|
||||
Code: 70. DB::Ex---tion: The type "map" of an input column "a" is not supported for conversion from a Parquet data format: data for INSERT was parsed from stdin
|
||||
|
||||
=== Try load data from non_hadoop_lz4_compressed.parquet
|
||||
1593604800 abc 42
|
||||
1593604800 def 7.7
|
||||
1593604801 abc 42.125
|
||||
1593604801 def 7.7
|
||||
=== Try load data from nonnullable.impala.parquet
|
||||
Code: 8. DB::Ex---tion: Column "element" is not presented in input data: data for INSERT was parsed from stdin
|
||||
|
||||
../contrib/arrow/cpp/src/arrow/array/array_nested.cc:192: Check failed: (self->list_type_->value_type()->id()) == (data->child_data[0]->type->id())
|
||||
=== Try load data from nullable.impala.parquet
|
||||
Code: 8. DB::Ex---tion: Column "element" is not presented in input data: data for INSERT was parsed from stdin
|
||||
|
||||
../contrib/arrow/cpp/src/arrow/array/array_nested.cc:192: Check failed: (self->list_type_->value_type()->id()) == (data->child_data[0]->type->id())
|
||||
=== Try load data from nulls.snappy.parquet
|
||||
Code: 8. DB::Ex---tion: Column "b_c_int" is not presented in input data: data for INSERT was parsed from stdin
|
||||
|
||||
=== Try load data from repeated_no_annotation.parquet
|
||||
Code: 8. DB::Ex---tion: Column "number" is not presented in input data: data for INSERT was parsed from stdin
|
||||
Code: 70. DB::Ex---tion: The type "struct" of an input column "b_struct" is not supported for conversion from a Parquet data format: data for INSERT was parsed from stdin
|
||||
|
||||
=== Try load data from single_nan.parquet
|
||||
\N
|
||||
=== Try load data from userdata1.parquet
|
||||
1454486129 1 Amanda Jordan ajordan0@com.com Female 1.197.201.2 6759521864920116 Indonesia 3/8/1971 49756.53 Internal Auditor 1E+02
|
||||
1454519043 2 Albert Freeman afreeman1@is.gd Male 218.111.175.34 Canada 1/16/1968 150280.17 Accountant IV
|
||||
|
@ -5,8 +5,6 @@
|
||||
# TODO: Add more files.
|
||||
#
|
||||
|
||||
# To regenerate data install perl JSON::XS module: sudo apt install libjson-xs-perl
|
||||
|
||||
# Also 5 sample files from
|
||||
# wget https://github.com/Teradata/kylo/raw/master/samples/sample-data/parquet/userdata1.parquet
|
||||
# ...
|
||||
@ -19,38 +17,46 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
CB_DIR=$(dirname "$CLICKHOUSE_CLIENT_BINARY")
|
||||
[ "$CB_DIR" == "." ] && ROOT_DIR=$CUR_DIR/../../../..
|
||||
[ "$CB_DIR" != "." ] && BUILD_DIR=$CB_DIR/../..
|
||||
[ -z "$ROOT_DIR" ] && ROOT_DIR=$CB_DIR/../../..
|
||||
[ "$CB_DIR" == "." ] && ROOT_DIR=$CUR_DIR/../../..
|
||||
[ -z "$ROOT_DIR" ] && ROOT_DIR=$CB_DIR/../..
|
||||
|
||||
DATA_DIR=$CUR_DIR/data_parquet
|
||||
|
||||
[ -n "$ROOT_DIR" ] && [ -z "$PARQUET_READER" ] && PARQUET_READER="$ROOT_DIR"/contrib/arrow/cpp/build/release/parquet-reader
|
||||
|
||||
# To update:
|
||||
# cp $ROOT_DIR/contrib/arrow/cpp/submodules/parquet-testing/data/*.parquet $ROOT_DIR/contrib/arrow/python/pyarrow/tests/data/parquet/*.parquet $CUR_DIR/data_parquet/
|
||||
|
||||
# BUG! nulls.snappy.parquet - parquet-reader shows wrong structure. Actual structure is {"type":"struct","fields":[{"name":"b_struct","type":{"type":"struct","fields":[{"name":"b_c_int","type":"integer","nullable":true,"metadata":{}}]},"nullable":true,"metadata":{}}]}
|
||||
# why? repeated_no_annotation.parquet
|
||||
# ClickHouse Parquet reader doesn't support such complex types, so I didn't burrow into the issue.
|
||||
# There is failure due parsing nested arrays or nested maps with NULLs:
|
||||
# ../contrib/arrow/cpp/src/arrow/array/array_nested.cc:192: Check failed: (self->list_type_->value_type()->id()) == (data->child_data[0]->type->id())
|
||||
|
||||
for NAME in $(find "$DATA_DIR"/*.parquet -print0 | xargs -0 -n 1 basename | sort); do
|
||||
# Strange behaviour for repeated_no_annotation.parquet around __buitin_expect, so this file was disabled:
|
||||
# debug:
|
||||
# ../contrib/arrow/cpp/src/arrow/array/array_nested.cc:193: Check failed: self->list_type_->value_type()->Equals(data->child_data[0]->type)
|
||||
# release:
|
||||
# Code: 349. DB::Ex---tion: Can not insert NULL data into non-nullable column "phoneNumbers": data for INSERT was parsed from stdin
|
||||
|
||||
for NAME in $(find "$DATA_DIR"/*.parquet -print0 | xargs -0 -n 1 basename | LC_ALL=C sort); do
|
||||
echo === Try load data from "$NAME"
|
||||
|
||||
JSON=$DATA_DIR/$NAME.json
|
||||
COLUMNS_FILE=$DATA_DIR/$NAME.columns
|
||||
|
||||
# If you want change or add .parquet file - rm data_parquet/*.json data_parquet/*.columns
|
||||
[ -n "$BUILD_DIR" ] && [ ! -s "$COLUMNS_FILE" ] && [ ! -s "$JSON" ] && "$BUILD_DIR"/contrib/arrow-cmake/parquet-reader --json "$DATA_DIR"/"$NAME" > "$JSON"
|
||||
[ -n "$BUILD_DIR" ] && [ ! -s "$COLUMNS_FILE" ] && "$CUR_DIR"/00900_parquet_create_table_columns.pl "$JSON" > "$COLUMNS_FILE"
|
||||
[ -n "$PARQUET_READER" ] && [ ! -s "$COLUMNS_FILE" ] && [ ! -s "$JSON" ] && "$PARQUET_READER" --json "$DATA_DIR"/"$NAME" > "$JSON"
|
||||
[ ! -s "$COLUMNS_FILE" ] && "$CUR_DIR"/helpers/00900_parquet_create_table_columns.py "$JSON" > "$COLUMNS_FILE"
|
||||
|
||||
# Debug only:
|
||||
# [ -n "$BUILD_DIR" ] && $BUILD_DIR/contrib/arrow-cmake/parquet-reader $DATA_DIR/$NAME > $DATA_DIR/$NAME.dump
|
||||
# [ -n "$PARQUET_READER" ] && $PARQUET_READER $DATA_DIR/$NAME > $DATA_DIR/$NAME.dump
|
||||
|
||||
#COLUMNS=`$CUR_DIR/00900_parquet_create_table_columns.pl $JSON` 2>&1 || continue
|
||||
# COLUMNS=`$CUR_DIR/00900_parquet_create_table_columns.py $JSON` 2>&1 || continue
|
||||
COLUMNS=$(cat "$COLUMNS_FILE") || continue
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load"
|
||||
${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load ($COLUMNS) ENGINE = Memory"
|
||||
|
||||
# Some files is broken, exception is ok.
|
||||
# Some files contain unsupported data structures, exception is ok.
|
||||
cat "$DATA_DIR"/"$NAME" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_load FORMAT Parquet" 2>&1 | sed 's/Exception/Ex---tion/'
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load LIMIT 100"
|
||||
|
@ -0,0 +1 @@
|
||||
1
|
2
tests/queries/0_stateless/01051_system_stack_trace.sql
Normal file
2
tests/queries/0_stateless/01051_system_stack_trace.sql
Normal file
@ -0,0 +1,2 @@
|
||||
-- at least this query should be present
|
||||
SELECT count() > 0 FROM system.stack_trace WHERE query_id != '';
|
@ -103,8 +103,10 @@ done
|
||||
sleep 1
|
||||
|
||||
counter=0
|
||||
have_undone_mutations_query="select * from system.mutations where table like 'concurrent_mutate_mt_%' and is_done=0 and database='${CLICKHOUSE_DATABASE}'"
|
||||
have_all_tables_query="select count() FROM system.tables WHERE name LIKE 'concurrent_mutate_mt_%' and database='${CLICKHOUSE_DATABASE}'"
|
||||
|
||||
while [[ $($CLICKHOUSE_CLIENT --query "select * from system.mutations where table like 'concurrent_mutate_mt_%' and is_done=0" 2>&1) ]]; do
|
||||
while true ; do
|
||||
if [ "$counter" -gt 120 ]
|
||||
then
|
||||
break
|
||||
@ -113,7 +115,13 @@ while [[ $($CLICKHOUSE_CLIENT --query "select * from system.mutations where tabl
|
||||
for i in $(seq $REPLICAS); do
|
||||
$CLICKHOUSE_CLIENT --query "ATTACH TABLE concurrent_mutate_mt_$i" 2> /dev/null
|
||||
done
|
||||
|
||||
counter=$(($counter + 1))
|
||||
|
||||
# no active mutations and all tables attached
|
||||
if [[ -z $($CLICKHOUSE_CLIENT --query "$have_undone_mutations_query" 2>&1) && $($CLICKHOUSE_CLIENT --query "$have_all_tables_query" 2>&1) == "$REPLICAS" ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
for i in $(seq $REPLICAS); do
|
||||
|
@ -2,4 +2,6 @@ Instruction check fail. The CPU does not support SSSE3 instruction set.
|
||||
Instruction check fail. The CPU does not support SSE4.1 instruction set.
|
||||
Instruction check fail. The CPU does not support SSE4.2 instruction set.
|
||||
Instruction check fail. The CPU does not support POPCNT instruction set.
|
||||
MADV_DONTNEED does not zeroed page. jemalloc will be broken
|
||||
<jemalloc>: MADV_DONTNEED does not work (memset will be used instead)
|
||||
<jemalloc>: (This is the expected behaviour if you are running under QEMU)
|
||||
1
|
||||
|
@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} --format Null -n <<<'SELECT sleepEachRow(1) FROM numbers(5)
|
||||
yes 'SELECT 1' 2>/dev/null | {
|
||||
head -n1000
|
||||
} | {
|
||||
xargs -i ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&max_memory_usage_for_user=$((1<<30))" -d '{}'
|
||||
xargs -I{} ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&max_memory_usage_for_user=$((1<<30))" -d '{}'
|
||||
} | grep -x -c 1
|
||||
|
||||
wait
|
||||
|
@ -20,9 +20,7 @@ attempt to parse with input_format_allow_errors_ratio=0.3
|
||||
1 0
|
||||
2 0
|
||||
3 0
|
||||
4 0
|
||||
5 0
|
||||
6 0
|
||||
Return code: 0
|
||||
******************
|
||||
attempt to parse with input_format_allow_errors_num=1
|
||||
@ -34,7 +32,5 @@ attempt to parse with input_format_allow_errors_num=2
|
||||
1 0
|
||||
2 0
|
||||
3 0
|
||||
4 0
|
||||
5 0
|
||||
6 0
|
||||
Return code: 0
|
||||
|
@ -11,34 +11,34 @@ cat "$SAMPLE_FILE"
|
||||
|
||||
echo '******************'
|
||||
echo 'attempt to parse w/o flags'
|
||||
cat "$SAMPLE_FILE" | clickhouse-local --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' 2>"$STD_ERROR_CAPTURED"
|
||||
cat "$SAMPLE_FILE" | ${CLICKHOUSE_LOCAL} --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' 2>"$STD_ERROR_CAPTURED"
|
||||
echo "Return code: $?"
|
||||
expected_error_message='is not like Int64'
|
||||
cat "$STD_ERROR_CAPTURED" | grep -q "$expected_error_message" && echo "OK: stderr contains a message '$expected_error_message'" || echo "FAILED: Error message is wrong"
|
||||
|
||||
echo '******************'
|
||||
echo 'attempt to parse with input_format_allow_errors_ratio=0.1'
|
||||
cat "$SAMPLE_FILE" | clickhouse-local --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' --input_format_allow_errors_ratio=0.1 2>"$STD_ERROR_CAPTURED"
|
||||
cat "$SAMPLE_FILE" | ${CLICKHOUSE_LOCAL} --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' --input_format_allow_errors_ratio=0.1 2>"$STD_ERROR_CAPTURED"
|
||||
echo "Return code: $?"
|
||||
expected_error_message='Already have 1 errors out of 5 rows, which is 0.2'
|
||||
cat "$STD_ERROR_CAPTURED" | grep -q "$expected_error_message" && echo "OK: stderr contains a message '$expected_error_message'" || echo "FAILED: Error message is wrong"
|
||||
|
||||
echo '******************'
|
||||
echo 'attempt to parse with input_format_allow_errors_ratio=0.3'
|
||||
cat "$SAMPLE_FILE" | clickhouse-local --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' --input_format_allow_errors_ratio=0.3 2>"$STD_ERROR_CAPTURED"
|
||||
cat "$SAMPLE_FILE" | ${CLICKHOUSE_LOCAL} --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' --input_format_allow_errors_ratio=0.3 2>"$STD_ERROR_CAPTURED"
|
||||
echo "Return code: $?"
|
||||
cat "$STD_ERROR_CAPTURED"
|
||||
|
||||
echo '******************'
|
||||
echo 'attempt to parse with input_format_allow_errors_num=1'
|
||||
cat "$SAMPLE_FILE" | clickhouse-local --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' --input_format_allow_errors_num=1 2>"$STD_ERROR_CAPTURED"
|
||||
cat "$SAMPLE_FILE" | ${CLICKHOUSE_LOCAL} --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' --input_format_allow_errors_num=1 2>"$STD_ERROR_CAPTURED"
|
||||
echo "Return code: $?"
|
||||
expected_error_message='Already have 2 errors out of 7 rows'
|
||||
cat "$STD_ERROR_CAPTURED" | grep -q "$expected_error_message" && echo "OK: stderr contains a message '$expected_error_message'" || echo "FAILED: Error message is wrong"
|
||||
|
||||
echo '******************'
|
||||
echo 'attempt to parse with input_format_allow_errors_num=2'
|
||||
cat "$SAMPLE_FILE" | clickhouse-local --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' --input_format_allow_errors_num=2 2>"$STD_ERROR_CAPTURED"
|
||||
cat "$SAMPLE_FILE" | ${CLICKHOUSE_LOCAL} --input-format=CSV --structure='num1 Int64, num2 Int64' --query='SELECT * from table' --input_format_allow_errors_num=2 2>"$STD_ERROR_CAPTURED"
|
||||
echo "Return code: $?"
|
||||
cat "$STD_ERROR_CAPTURED"
|
||||
|
||||
|
@ -5,7 +5,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
function test()
|
||||
function test_func()
|
||||
{
|
||||
ENGINE=$1
|
||||
MAX_MEM=4096
|
||||
@ -32,9 +32,9 @@ function test()
|
||||
$CLICKHOUSE_CLIENT --query "DROP TABLE log";
|
||||
}
|
||||
|
||||
test TinyLog | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)'
|
||||
test StripeLog | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)'
|
||||
test Log | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)'
|
||||
test_func TinyLog | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)'
|
||||
test_func StripeLog | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)'
|
||||
test_func Log | grep -v -P '^(Memory limit|0\t0|File not found|[1-9]000000\t)'
|
||||
|
||||
rm "${CLICKHOUSE_TMP}/insert_result"
|
||||
rm "${CLICKHOUSE_TMP}/select_result"
|
||||
|
@ -43,7 +43,7 @@ function check_sticky_mutations()
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "SYSTEM START MERGES sticking_mutations"
|
||||
|
||||
# just to be sure, that previous mutations finished
|
||||
# Just to be sure, that previous mutations finished
|
||||
$CLICKHOUSE_CLIENT --query "ALTER TABLE sticking_mutations DELETE WHERE value2 % 31 == 0 SETTINGS mutations_sync = 1"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE sticking_mutations FINAL"
|
||||
|
@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
|
||||
# Regression for UAF in ThreadPool.
|
||||
# (Triggered under TSAN)
|
||||
for i in {1..10}; do
|
||||
for _ in {1..10}; do
|
||||
${CLICKHOUSE_LOCAL} -q 'select * from numbers_mt(100000000) settings max_threads=100 FORMAT Null'
|
||||
# Binding to specific CPU is not required, but this makes the test more reliable.
|
||||
taskset --cpu-list 0 ${CLICKHOUSE_LOCAL} -q 'select * from numbers_mt(100000000) settings max_threads=100 FORMAT Null'
|
||||
|
@ -16,6 +16,6 @@ opts=(
|
||||
)
|
||||
${CLICKHOUSE_CLIENT} "${opts[@]}" -q "SELECT groupArray(repeat('a', if(_shard_num == 2, 100000, 1))), number%100000 k from remote('127.{2,3}', system.numbers) GROUP BY k LIMIT 10e6" |& {
|
||||
# the query should fail earlier on 127.3 and 127.2 should not even go to the memory limit exceeded error.
|
||||
fgrep -q 'DB::Exception: Received from 127.3:9000. DB::Exception: Memory limit (for query) exceeded:'
|
||||
grep -F -q 'DB::Exception: Received from 127.3:9000. DB::Exception: Memory limit (for query) exceeded:'
|
||||
# while if this will not correctly then it will got the exception from the 127.2:9000 and fail
|
||||
}
|
||||
|
@ -0,0 +1,3 @@
|
||||
-1 DateTime64(1, \'UTC\') < 1 1 1 <= 1 1 1 = 0 0 0 >= 0 0 0 > 0 0 0 != 1 1 1
|
||||
0 DateTime64(1, \'UTC\') < 0 0 0 <= 1 1 1 = 1 1 1 >= 1 1 1 > 0 0 0 != 0 0 0
|
||||
1 DateTime64(1, \'UTC\') < 0 0 0 <= 0 0 0 = 0 0 0 >= 1 1 1 > 1 1 1 != 1 1 1
|
@ -0,0 +1,43 @@
|
||||
SELECT
|
||||
n,
|
||||
toTypeName(dt64) AS dt64_typename,
|
||||
|
||||
'<',
|
||||
dt64 < dt,
|
||||
toDateTime(dt64) < dt,
|
||||
dt64 < toDateTime64(dt, 1, 'UTC'),
|
||||
|
||||
'<=',
|
||||
dt64 <= dt,
|
||||
toDateTime(dt64) <= dt,
|
||||
dt64 <= toDateTime64(dt, 1, 'UTC'),
|
||||
|
||||
'=',
|
||||
dt64 = dt,
|
||||
toDateTime(dt64) = dt,
|
||||
dt64 = toDateTime64(dt, 1, 'UTC'),
|
||||
|
||||
'>=',
|
||||
dt64 >= dt,
|
||||
toDateTime(dt64) >= dt,
|
||||
dt64 >= toDateTime64(dt, 1, 'UTC'),
|
||||
|
||||
'>',
|
||||
dt64 > dt,
|
||||
toDateTime(dt64) > dt,
|
||||
dt64 > toDateTime64(dt, 1, 'UTC'),
|
||||
|
||||
'!=',
|
||||
dt64 != dt,
|
||||
toDateTime(dt64) != dt,
|
||||
dt64 != toDateTime64(dt, 1, 'UTC')
|
||||
FROM
|
||||
(
|
||||
WITH toDateTime('2015-05-18 07:40:11') as value
|
||||
SELECT
|
||||
number - 1 as n,
|
||||
toDateTime64(value, 1, 'UTC') AS dt64,
|
||||
value - n as dt
|
||||
FROM system.numbers
|
||||
LIMIT 3
|
||||
)
|
@ -0,0 +1,3 @@
|
||||
-1 DateTime64(1, \'UTC\') < 1 1 1 <= 1 1 1 = 0 0 0 >= 0 0 0 > 0 0 0 != 1 1 1
|
||||
0 DateTime64(1, \'UTC\') < 0 0 0 <= 0 1 0 = 0 1 0 >= 1 1 1 > 1 0 1 != 1 0 1
|
||||
1 DateTime64(1, \'UTC\') < 0 0 0 <= 0 0 0 = 0 0 0 >= 1 1 1 > 1 1 1 != 1 1 1
|
@ -0,0 +1,43 @@
|
||||
SELECT
|
||||
n,
|
||||
toTypeName(dt64) AS dt64_typename,
|
||||
|
||||
'<',
|
||||
dt64 < d,
|
||||
toDate(dt64) < d,
|
||||
dt64 < toDateTime64(d, 1, 'UTC'),
|
||||
|
||||
'<=',
|
||||
dt64 <= d,
|
||||
toDate(dt64) <= d,
|
||||
dt64 <= toDateTime64(d, 1, 'UTC'),
|
||||
|
||||
'=',
|
||||
dt64 = d,
|
||||
toDate(dt64) = d,
|
||||
dt64 = toDateTime64(d, 1, 'UTC'),
|
||||
|
||||
'>=',
|
||||
dt64 >= d,
|
||||
toDate(dt64) >= d,
|
||||
dt64 >= toDateTime64(d, 1, 'UTC'),
|
||||
|
||||
'>',
|
||||
dt64 > d,
|
||||
toDate(dt64) > d,
|
||||
dt64 > toDateTime64(d, 1, 'UTC'),
|
||||
|
||||
'!=',
|
||||
dt64 != d,
|
||||
toDate(dt64) != d,
|
||||
dt64 != toDateTime64(d, 1, 'UTC')
|
||||
FROM
|
||||
(
|
||||
WITH toDateTime('2019-09-16 19:20:11') as val
|
||||
SELECT
|
||||
number - 1 as n,
|
||||
toDateTime64(val, 1, 'UTC') AS dt64,
|
||||
toDate(val, 'UTC') - n as d
|
||||
FROM system.numbers
|
||||
LIMIT 3
|
||||
)
|
@ -120,3 +120,7 @@ INSERT INTO partial_duplicates SELECT * FROM source_data;
|
||||
OPTIMIZE TABLE partial_duplicates FINAL DEDUPLICATE BY COLUMNS('.*k');
|
||||
SELECT * FROM partial_duplicates;
|
||||
TRUNCATE partial_duplicates;
|
||||
|
||||
DROP TABLE full_duplicates;
|
||||
DROP TABLE partial_duplicates;
|
||||
DROP TABLE source_data;
|
||||
|
@ -1,47 +1,47 @@
|
||||
check that we have a data
|
||||
r1 1 1001 3 2 2
|
||||
r1 1 2001 1 1 1
|
||||
r1 2 1002 1 1 1
|
||||
r1 2 2002 1 1 1
|
||||
r1 3 1003 2 2 2
|
||||
r1 4 1004 2 2 2
|
||||
r1 5 2005 2 2 1
|
||||
r1 9 1002 1 1 1
|
||||
r2 1 1001 3 2 2
|
||||
r2 1 2001 1 1 1
|
||||
r2 2 1002 1 1 1
|
||||
r2 2 2002 1 1 1
|
||||
r2 3 1003 2 2 2
|
||||
r2 4 1004 2 2 2
|
||||
r2 5 2005 2 2 1
|
||||
r2 9 1002 1 1 1
|
||||
r1 1 1001 3 2
|
||||
r1 1 2001 1 1
|
||||
r1 2 1002 1 1
|
||||
r1 2 2002 1 1
|
||||
r1 3 1003 2 2
|
||||
r1 4 1004 2 2
|
||||
r1 5 2005 2 2
|
||||
r1 9 1002 1 1
|
||||
r2 1 1001 3 2
|
||||
r2 1 2001 1 1
|
||||
r2 2 1002 1 1
|
||||
r2 2 2002 1 1
|
||||
r2 3 1003 2 2
|
||||
r2 4 1004 2 2
|
||||
r2 5 2005 2 2
|
||||
r2 9 1002 1 1
|
||||
after old OPTIMIZE DEDUPLICATE
|
||||
r1 1 1001 3 2 2
|
||||
r1 1 2001 1 1 1
|
||||
r1 2 1002 1 1 1
|
||||
r1 2 2002 1 1 1
|
||||
r1 3 1003 2 2 2
|
||||
r1 4 1004 2 2 2
|
||||
r1 5 2005 2 2 1
|
||||
r1 9 1002 1 1 1
|
||||
r2 1 1001 3 2 2
|
||||
r2 1 2001 1 1 1
|
||||
r2 2 1002 1 1 1
|
||||
r2 2 2002 1 1 1
|
||||
r2 3 1003 2 2 2
|
||||
r2 4 1004 2 2 2
|
||||
r2 5 2005 2 2 1
|
||||
r2 9 1002 1 1 1
|
||||
r1 1 1001 2 2
|
||||
r1 1 2001 1 1
|
||||
r1 2 1002 1 1
|
||||
r1 2 2002 1 1
|
||||
r1 3 1003 2 2
|
||||
r1 4 1004 2 2
|
||||
r1 5 2005 2 2
|
||||
r1 9 1002 1 1
|
||||
r2 1 1001 2 2
|
||||
r2 1 2001 1 1
|
||||
r2 2 1002 1 1
|
||||
r2 2 2002 1 1
|
||||
r2 3 1003 2 2
|
||||
r2 4 1004 2 2
|
||||
r2 5 2005 2 2
|
||||
r2 9 1002 1 1
|
||||
check data again after multiple deduplications with new syntax
|
||||
r1 1 1001 1 1 1
|
||||
r1 2 1002 1 1 1
|
||||
r1 3 1003 1 1 1
|
||||
r1 4 1004 1 1 1
|
||||
r1 5 2005 1 1 1
|
||||
r1 9 1002 1 1 1
|
||||
r2 1 1001 1 1 1
|
||||
r2 2 1002 1 1 1
|
||||
r2 3 1003 1 1 1
|
||||
r2 4 1004 1 1 1
|
||||
r2 5 2005 1 1 1
|
||||
r2 9 1002 1 1 1
|
||||
r1 1 1001 1 1
|
||||
r1 2 1002 1 1
|
||||
r1 3 1003 1 1
|
||||
r1 4 1004 1 1
|
||||
r1 5 2005 1 1
|
||||
r1 9 1002 1 1
|
||||
r2 1 1001 1 1
|
||||
r2 2 1002 1 1
|
||||
r2 3 1003 1 1
|
||||
r2 4 1004 1 1
|
||||
r2 5 2005 1 1
|
||||
r2 9 1002 1 1
|
||||
|
@ -3,57 +3,50 @@
|
||||
--- replicated case
|
||||
|
||||
-- Just in case if previous tests run left some stuff behind.
|
||||
DROP TABLE IF EXISTS replicated_deduplicate_by_columns_r1;
|
||||
DROP TABLE IF EXISTS replicated_deduplicate_by_columns_r2;
|
||||
DROP TABLE IF EXISTS replicated_deduplicate_by_columns_r1 SYNC;
|
||||
DROP TABLE IF EXISTS replicated_deduplicate_by_columns_r2 SYNC;
|
||||
|
||||
SET replication_alter_partitions_sync = 2;
|
||||
|
||||
-- IRL insert_replica_id were filled from hostname
|
||||
CREATE TABLE IF NOT EXISTS replicated_deduplicate_by_columns_r1 (
|
||||
id Int32, val UInt32, unique_value UInt64 MATERIALIZED rowNumberInBlock(), insert_replica_id UInt8 MATERIALIZED randConstant()
|
||||
id Int32, val UInt32, unique_value UInt64 MATERIALIZED rowNumberInBlock()
|
||||
) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01581/replicated_deduplicate', 'r1') ORDER BY id;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS replicated_deduplicate_by_columns_r2 (
|
||||
id Int32, val UInt32, unique_value UInt64 MATERIALIZED rowNumberInBlock(), insert_replica_id UInt8 MATERIALIZED randConstant()
|
||||
id Int32, val UInt32, unique_value UInt64 MATERIALIZED rowNumberInBlock()
|
||||
) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01581/replicated_deduplicate', 'r2') ORDER BY id;
|
||||
|
||||
|
||||
SYSTEM STOP REPLICATED SENDS;
|
||||
SYSTEM STOP FETCHES;
|
||||
SYSTEM STOP REPLICATION QUEUES;
|
||||
|
||||
-- insert some data, 2 records: (3, 1003), (4, 1004) are duplicated and have difference in unique_value / insert_replica_id
|
||||
-- (1, 1001), (5, 2005) has full duplicates
|
||||
INSERT INTO replicated_deduplicate_by_columns_r1 VALUES (1, 1001), (1, 1001), (2, 1002), (3, 1003), (4, 1004), (1, 2001), (9, 1002);
|
||||
INSERT INTO replicated_deduplicate_by_columns_r2 VALUES (1, 1001), (2, 2002), (3, 1003), (4, 1004), (5, 2005), (5, 2005);
|
||||
|
||||
SYSTEM START REPLICATION QUEUES;
|
||||
SYSTEM START FETCHES;
|
||||
SYSTEM START REPLICATED SENDS;
|
||||
|
||||
-- wait for syncing replicas
|
||||
-- make sure that all data is present on all replicas
|
||||
SYSTEM SYNC REPLICA replicated_deduplicate_by_columns_r2;
|
||||
SYSTEM SYNC REPLICA replicated_deduplicate_by_columns_r1;
|
||||
|
||||
SELECT 'check that we have a data';
|
||||
SELECT 'r1', id, val, count(), uniqExact(unique_value), uniqExact(insert_replica_id) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r2', id, val, count(), uniqExact(unique_value), uniqExact(insert_replica_id) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r1', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r2', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val;
|
||||
|
||||
|
||||
-- NOTE: here and below we need FINAL to force deduplication in such a small set of data in only 1 part.
|
||||
-- that should remove full duplicates
|
||||
OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE;
|
||||
|
||||
SELECT 'after old OPTIMIZE DEDUPLICATE';
|
||||
SELECT 'r1', id, val, count(), uniqExact(unique_value), uniqExact(insert_replica_id) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r2', id, val, count(), uniqExact(unique_value), uniqExact(insert_replica_id) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r1', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r2', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val;
|
||||
|
||||
OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE BY id, val;
|
||||
OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE BY COLUMNS('[id, val]');
|
||||
OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE BY COLUMNS('[i]') EXCEPT(unique_value, insert_replica_id);
|
||||
OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE BY COLUMNS('[i]') EXCEPT(unique_value);
|
||||
|
||||
SELECT 'check data again after multiple deduplications with new syntax';
|
||||
SELECT 'r1', id, val, count(), uniqExact(unique_value), uniqExact(insert_replica_id) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r2', id, val, count(), uniqExact(unique_value), uniqExact(insert_replica_id) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r1', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val;
|
||||
SELECT 'r2', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val;
|
||||
|
||||
-- cleanup the mess
|
||||
DROP TABLE replicated_deduplicate_by_columns_r1;
|
||||
|
@ -0,0 +1,4 @@
|
||||
['Hello','world','42" TV']
|
||||
['Hello','world','42" TV']
|
||||
['Hello','world','42" TV']
|
||||
['Hello','world','42" TV']
|
27
tests/queries/0_stateless/01607_arrays_as_nested_csv.sh
Executable file
27
tests/queries/0_stateless/01607_arrays_as_nested_csv.sh
Executable file
@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CLIENT} --multiquery --query "
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test (a Array(String)) ENGINE = Memory;
|
||||
"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "INSERT INTO test FORMAT CSV" <<END
|
||||
"['Hello', 'world', '42"" TV']"
|
||||
END
|
||||
|
||||
${CLICKHOUSE_CLIENT} --format_csv_allow_single_quotes 0 --query "INSERT INTO test FORMAT CSV" <<END
|
||||
"'Hello', 'world', '42"" TV'"
|
||||
END
|
||||
|
||||
${CLICKHOUSE_CLIENT} --input_format_csv_arrays_as_nested_csv 1 --query "INSERT INTO test FORMAT CSV" <<END
|
||||
"[""Hello"", ""world"", ""42"""" TV""]"
|
||||
"""Hello"", ""world"", ""42"""" TV"""
|
||||
END
|
||||
|
||||
${CLICKHOUSE_CLIENT} --multiquery --query "
|
||||
SELECT * FROM test;
|
||||
DROP TABLE IF EXISTS test;
|
||||
"
|
25
tests/queries/0_stateless/01610_client_spawn_editor.sh
Executable file
25
tests/queries/0_stateless/01610_client_spawn_editor.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/expect -f
|
||||
|
||||
log_user 0
|
||||
set timeout 5
|
||||
match_max 100000
|
||||
|
||||
if ![info exists env(CLICKHOUSE_PORT_TCP)] {set env(CLICKHOUSE_PORT_TCP) 9000}
|
||||
|
||||
set env(EDITOR) [file dirname [file normalize [info script]]]"/01610_client_spawn_editor_open.editor"
|
||||
|
||||
spawn clickhouse-client
|
||||
expect ":) "
|
||||
|
||||
# Open EDITOR
|
||||
send -- "\033E"
|
||||
# Send return
|
||||
send -- "\r"
|
||||
expect {
|
||||
"│ 1 │" { }
|
||||
timeout { exit 1 }
|
||||
}
|
||||
expect ":) "
|
||||
|
||||
send -- ""
|
||||
expect eof
|
3
tests/queries/0_stateless/01610_client_spawn_editor_open.editor
Executable file
3
tests/queries/0_stateless/01610_client_spawn_editor_open.editor
Executable file
@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo "select 1" > "$1"
|
@ -0,0 +1 @@
|
||||
1
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user