mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge pull request #45819 from qoega/clickhouse-help
This commit is contained in:
commit
ebb1b990f2
@ -41,6 +41,8 @@ contents:
|
||||
dst: /usr/bin/clickhouse-library-bridge
|
||||
- src: root/usr/bin/clickhouse-odbc-bridge
|
||||
dst: /usr/bin/clickhouse-odbc-bridge
|
||||
- src: root/usr/bin/clickhouse-static-files-disk-uploader
|
||||
dst: /usr/bin/clickhouse-static-files-disk-uploader
|
||||
- src: root/usr/share/bash-completion/completions
|
||||
dst: /usr/share/bash-completion/completions
|
||||
# docs
|
||||
|
@ -59,6 +59,8 @@ contents:
|
||||
dst: /usr/bin/clickhouse-report
|
||||
- src: root/usr/bin/clickhouse-server
|
||||
dst: /usr/bin/clickhouse-server
|
||||
- src: root/usr/bin/clickhouse-su
|
||||
dst: /usr/bin/clickhouse-su
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-server/AUTHORS
|
||||
|
@ -638,6 +638,16 @@ public:
|
||||
#pragma GCC optimize("-fno-var-tracking-assignments")
|
||||
#endif
|
||||
|
||||
[[ maybe_unused ]] static std::string getHelpHeader()
|
||||
{
|
||||
return
|
||||
"Usage: clickhouse benchmark [options] < queries.txt\n"
|
||||
"Usage: clickhouse benchmark [options] --query \"query text\"\n"
|
||||
|
||||
"clickhouse-benchmark connects to ClickHouse server,"
|
||||
" repeatedly sends specified queries and produces reports query statistics.\n";
|
||||
}
|
||||
|
||||
int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
{
|
||||
using namespace DB;
|
||||
@ -705,7 +715,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << argv[0] << " [options] < queries.txt\n";
|
||||
std::cout << getHelpHeader();
|
||||
std::cout << desc << "\n";
|
||||
return 1;
|
||||
}
|
||||
|
@ -959,12 +959,47 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
}
|
||||
|
||||
|
||||
[[ maybe_unused ]] static std::string getHelpHeader()
|
||||
{
|
||||
return
|
||||
"Usage: clickhouse client [initial table definition] [--query <query>]\n"
|
||||
"clickhouse-client is a client application that is used to connect to ClickHouse.\n"
|
||||
|
||||
"It can run queries as command line tool if you pass queries as an argument or as interactive client."
|
||||
" Queries can run one at a time, or in in a multiquery mode with --multiquery option."
|
||||
" To change settings you may use 'SET' statements and SETTINGS clause in queries or set is for a "
|
||||
" session with corresponding clickhouse-client arguments.\n"
|
||||
"'clickhouse client' command will try connect to clickhouse-server running on the same server."
|
||||
" If you have credentials set up pass them with --user <username> --password <password>"
|
||||
" or with --ask-password argument that will open command prompt.\n\n"
|
||||
|
||||
"This one will try connect to tcp native port(9000) without encryption:\n"
|
||||
" clickhouse client --host clickhouse.example.com --password mysecretpassword\n"
|
||||
"To connect to secure endpoint just set --secure argument. If you have "
|
||||
" artered port set it with --port <your port>.\n"
|
||||
" clickhouse client --secure --host clickhouse.example.com --password mysecretpassword\n";
|
||||
}
|
||||
|
||||
|
||||
[[ maybe_unused ]] static std::string getHelpFooter()
|
||||
{
|
||||
return
|
||||
"Note: If you have clickhouse installed on your system you can use 'clickhouse-client'"
|
||||
" invocation with a dash.\n\n"
|
||||
"Example printing current longest running query on a server:\n"
|
||||
" clickhouse client --query 'SELECT * FROM system.processes ORDER BY elapsed LIMIT 1 FORMAT Vertical'\n"
|
||||
"Example creating table and inserting data:\n";
|
||||
}
|
||||
|
||||
|
||||
void Client::printHelpMessage(const OptionsDescription & options_description)
|
||||
{
|
||||
std::cout << getHelpHeader() << "\n";
|
||||
std::cout << options_description.main_description.value() << "\n";
|
||||
std::cout << options_description.external_description.value() << "\n";
|
||||
std::cout << options_description.hosts_and_ports_description.value() << "\n";
|
||||
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
|
||||
std::cout << getHelpFooter() << "\n";
|
||||
}
|
||||
|
||||
|
||||
|
@ -97,8 +97,9 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << argv[0] << " [options] < INPUT > OUTPUT" << std::endl;
|
||||
std::cout << "Usage: " << argv[0] << " [options] INPUT OUTPUT" << std::endl;
|
||||
std::cout << "Usage: clickhouse compressor [options] < INPUT > OUTPUT" << std::endl;
|
||||
std::cout << "Alternative usage: clickhouse compressor [options] INPUT OUTPUT" << std::endl;
|
||||
|
||||
std::cout << desc << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
@ -72,10 +72,10 @@ void ClusterCopierApp::handleHelp(const std::string &, const std::string &)
|
||||
Poco::Util::HelpFormatter help_formatter(options());
|
||||
if (terminal_width)
|
||||
help_formatter.setWidth(terminal_width);
|
||||
help_formatter.setCommand(commandName());
|
||||
help_formatter.setCommand(commandName() == "clickhouse-copier" ? "clickhouse-copier" : commandName() + " copier");
|
||||
help_formatter.setHeader("Copies tables from one cluster to another");
|
||||
help_formatter.setUsage("--config-file <config-file> --task-path <task-path>");
|
||||
help_formatter.format(std::cerr);
|
||||
help_formatter.format(std::cout);
|
||||
|
||||
stopOptionsProcessing();
|
||||
}
|
||||
|
@ -31,8 +31,7 @@ void DisksApp::printHelpMessage(ProgramOptionsDescription & command_option_descr
|
||||
help_description->add(command_option_description);
|
||||
|
||||
std::cout << "ClickHouse disk management tool\n";
|
||||
std::cout << "Usage: ./clickhouse-disks [OPTION]\n";
|
||||
std::cout << "clickhouse-disks\n\n";
|
||||
std::cout << "usage clickhouse disks [OPTION]\n" << "clickhouse-disks\n\n";
|
||||
|
||||
for (const auto & current_command : supported_commands)
|
||||
std::cout << command_descriptions[current_command]->command_name
|
||||
|
@ -146,11 +146,11 @@ int mainEntryClickHouseExtractFromConfig(int argc, char ** argv)
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cerr << "Preprocess config file and extract value of the given key." << std::endl
|
||||
std::cout << "Preprocess config file and extract value of the given key." << std::endl
|
||||
<< std::endl;
|
||||
std::cerr << "Usage: clickhouse extract-from-config [options]" << std::endl
|
||||
std::cout << "Usage: clickhouse extract-from-config [options]" << std::endl
|
||||
<< std::endl;
|
||||
std::cerr << options_desc << std::endl;
|
||||
std::cout << options_desc << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << argv[0] << " [options] < query" << std::endl;
|
||||
std::cout << "Usage: clickhouse format [options] < query" << std::endl;
|
||||
std::cout << desc << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
@ -1189,7 +1189,7 @@ try
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
po::options_description desc("Allowed options", getTerminalWidth());
|
||||
po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help,h", "produce help message")
|
||||
("skip-commits-without-parents", po::value<bool>()->default_value(true),
|
||||
@ -1218,7 +1218,7 @@ try
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << documentation << '\n'
|
||||
<< "Usage: " << argv[0] << '\n'
|
||||
<< "Usage: clickhouse git-import\n"
|
||||
<< desc << '\n'
|
||||
<< "\nExample:\n"
|
||||
<< "\nclickhouse git-import --skip-paths 'generated\\.cpp|^(contrib|docs?|website|libs/(libcityhash|liblz4|libdivide|libvectorclass|libdouble-conversion|libcpuid|libzstd|libfarmhash|libmetrohash|libpoco|libwidechar_width))/' --skip-commits-with-messages '^Merge branch '\n";
|
||||
|
@ -220,8 +220,8 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
("log-path", po::value<std::string>()->default_value("var/log/clickhouse-server"), "where to create log directory")
|
||||
("data-path", po::value<std::string>()->default_value("var/lib/clickhouse"), "directory for data")
|
||||
("pid-path", po::value<std::string>()->default_value("var/run/clickhouse-server"), "directory for pid file")
|
||||
("user", po::value<std::string>()->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user to create")
|
||||
("group", po::value<std::string>()->default_value(DEFAULT_CLICKHOUSE_SERVER_GROUP), "clickhouse group to create")
|
||||
("user", po::value<std::string>()->implicit_value("")->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
||||
("group", po::value<std::string>()->implicit_value("")->default_value(DEFAULT_CLICKHOUSE_SERVER_GROUP), "clickhouse group")
|
||||
("noninteractive,y", "run non-interactively")
|
||||
("link", "create symlink to the binary instead of copying to binary-path")
|
||||
;
|
||||
@ -231,7 +231,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " install [options]", getuid() != 0) << '\n';
|
||||
std::cout << "Usage: " << formatWithSudo("clickhouse", getuid() != 0) << " install [options]\n";
|
||||
std::cout << desc << '\n';
|
||||
return 1;
|
||||
}
|
||||
@ -592,6 +592,11 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
fs::permissions(data_file, fs::perms::owner_read, fs::perm_options::replace);
|
||||
fmt::print("Data path configuration override is saved to file {}.\n", data_file);
|
||||
}
|
||||
else
|
||||
{
|
||||
fmt::print("WARNING: Configuration of data paths already exists in {}."
|
||||
" If you want to apply new paths, remove {} and run install again.\n", data_file, data_file);
|
||||
}
|
||||
|
||||
/// Logger.
|
||||
const std::string logger_file = config_d / "logger.xml";
|
||||
@ -609,6 +614,11 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
fs::permissions(logger_file, fs::perms::owner_read, fs::perm_options::replace);
|
||||
fmt::print("Log path configuration override is saved to file {}.\n", logger_file);
|
||||
}
|
||||
else
|
||||
{
|
||||
fmt::print("WARNING: Configuration of logger paths already exists in {}."
|
||||
" If you want to apply new paths, remove {} and run install again.\n", logger_file, logger_file);
|
||||
}
|
||||
|
||||
/// User directories.
|
||||
const std::string user_directories_file = config_d / "user-directories.xml";
|
||||
@ -627,6 +637,11 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
fs::permissions(user_directories_file, fs::perms::owner_read, fs::perm_options::replace);
|
||||
fmt::print("User directory path configuration override is saved to file {}.\n", user_directories_file);
|
||||
}
|
||||
else
|
||||
{
|
||||
fmt::print("WARNING: Configuration of user directories paths already exists in {}."
|
||||
" If you want to apply new paths, remove {} and run install again.\n", user_directories_file, user_directories_file);
|
||||
}
|
||||
|
||||
/// OpenSSL.
|
||||
const std::string openssl_file = config_d / "openssl.xml";
|
||||
@ -647,6 +662,11 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
fs::permissions(openssl_file, fs::perms::owner_read, fs::perm_options::replace);
|
||||
fmt::print("OpenSSL path configuration override is saved to file {}.\n", openssl_file);
|
||||
}
|
||||
else
|
||||
{
|
||||
fmt::print("WARNING: Configuration of OpenSSL paths already exists in {}."
|
||||
" If you want to apply new paths, remove {} and run install again.\n", openssl_file, openssl_file);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -658,18 +678,47 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
|
||||
if (configuration->has("path"))
|
||||
{
|
||||
data_path = configuration->getString("path");
|
||||
fmt::print("{} has {} as data path.\n", main_config_file.string(), data_path.string());
|
||||
std::string config_data_path = configuration->getString("path");
|
||||
fmt::print("{} has {} as data path.\n", main_config_file.string(), config_data_path);
|
||||
if (options.count("data-path"))
|
||||
{
|
||||
fmt::print("WARNING: Will use {} instead of {} as data path as it is set in {}.\n",
|
||||
config_data_path, data_path.string(), main_config_file.string());
|
||||
data_path = config_data_path;
|
||||
}
|
||||
}
|
||||
|
||||
if (configuration->has("logger.log"))
|
||||
{
|
||||
log_path = fs::path(configuration->getString("logger.log")).remove_filename();
|
||||
fmt::print("{} has {} as log path.\n", main_config_file.string(), log_path.string());
|
||||
std::string config_log_path = fs::path(configuration->getString("logger.log")).remove_filename();
|
||||
fmt::print("{} has {} as log path.\n", main_config_file.string(), config_log_path);
|
||||
if (options.count("log-path"))
|
||||
{
|
||||
fmt::print("WARNING: Will use {} instead of {} as log path as it is set in {}.\n",
|
||||
config_log_path, data_path.string(), main_config_file.string());
|
||||
log_path = config_log_path;
|
||||
}
|
||||
}
|
||||
|
||||
if (configuration->has("user_directories.local_directory.path"))
|
||||
{
|
||||
std::string config_user_directory_path = fs::path(configuration->getString("user_directories.local_directory.path")).remove_filename();
|
||||
fmt::print("{} has {} as log path.\n", main_config_file.string(), config_user_directory_path);
|
||||
if (options.count("data-path"))
|
||||
{
|
||||
fmt::print("WARNING: Will use {} instead of {} as user directory path as it is set in {}.\n",
|
||||
config_user_directory_path, (data_path / "access").string(), main_config_file.string());
|
||||
}
|
||||
}
|
||||
|
||||
/// OpenSSL.
|
||||
const std::string openssl_file = config_d / "openssl.xml";
|
||||
if (options.count("data-path") && !fs::exists(openssl_file))
|
||||
{
|
||||
fmt::print("WARNING: Paths for OpenSSL are not configured automatically.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!fs::exists(users_config_file))
|
||||
{
|
||||
std::string_view users_config_content = getResource("users.xml");
|
||||
@ -905,6 +954,33 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
if (has_password_for_default_user)
|
||||
maybe_password = " --password";
|
||||
|
||||
// If user specified --prefix, --pid-path, --config-path, --binary-path, --user, --group
|
||||
// in install args we need to pass them to start command
|
||||
|
||||
std::string maybe_prefix;
|
||||
if (options.count("prefix"))
|
||||
maybe_prefix = " --prefix " + prefix.string();
|
||||
|
||||
std::string maybe_pid_path;
|
||||
if (options.count("pid-path"))
|
||||
maybe_pid_path = " --pid-path " + options["pid-path"].as<std::string>();
|
||||
|
||||
std::string maybe_config_path;
|
||||
if (options.count("config-path"))
|
||||
maybe_config_path = " --config-path " + options["config-path"].as<std::string>();
|
||||
|
||||
std::string maybe_binary_path;
|
||||
if (options.count("binary-path"))
|
||||
maybe_binary_path = " --binary-path " + options["binary-path"].as<std::string>();
|
||||
|
||||
std::string maybe_user;
|
||||
if (options.count("user"))
|
||||
maybe_user = " --user " + options["user"].as<std::string>();
|
||||
|
||||
std::string maybe_group;
|
||||
if (options.count("group"))
|
||||
maybe_group = " --group " + options["group"].as<std::string>();
|
||||
|
||||
fs::path pid_file = pid_path / "clickhouse-server.pid";
|
||||
if (fs::exists(pid_file))
|
||||
{
|
||||
@ -913,9 +989,15 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
"\nRestart clickhouse-server with:\n"
|
||||
" {}\n"
|
||||
"\nStart clickhouse-client with:\n"
|
||||
" clickhouse-client{}\n\n",
|
||||
formatWithSudo("clickhouse restart"),
|
||||
maybe_password);
|
||||
" clickhouse-client{}{}{}{}{}{}{}\n\n",
|
||||
formatWithSudo("clickhouse restart", getuid() != 0),
|
||||
maybe_password,
|
||||
maybe_prefix,
|
||||
maybe_pid_path,
|
||||
maybe_config_path,
|
||||
maybe_binary_path,
|
||||
maybe_user,
|
||||
maybe_group);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -924,9 +1006,15 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
"\nStart clickhouse-server with:\n"
|
||||
" {}\n"
|
||||
"\nStart clickhouse-client with:\n"
|
||||
" clickhouse-client{}\n\n",
|
||||
formatWithSudo("clickhouse start"),
|
||||
maybe_password);
|
||||
" clickhouse-client{}{}{}{}{}{}{}\n\n",
|
||||
formatWithSudo("clickhouse start", getuid() != 0),
|
||||
maybe_password,
|
||||
maybe_prefix,
|
||||
maybe_pid_path,
|
||||
maybe_config_path,
|
||||
maybe_binary_path,
|
||||
maybe_user,
|
||||
maybe_group);
|
||||
}
|
||||
}
|
||||
catch (const fs::filesystem_error &)
|
||||
@ -947,10 +1035,9 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
int start(const std::string & user, const fs::path & executable, const fs::path & config, const fs::path & pid_file, unsigned max_tries)
|
||||
int start(const std::string & user, const std::string & group, const fs::path & binary, const fs::path & executable, const fs::path & config, const fs::path & pid_file, unsigned max_tries, bool no_sudo)
|
||||
{
|
||||
if (fs::exists(pid_file))
|
||||
{
|
||||
@ -990,9 +1077,18 @@ namespace
|
||||
|
||||
if (!user.empty())
|
||||
{
|
||||
if (no_sudo)
|
||||
{
|
||||
/// Sometimes there is no sudo available like in some docker images
|
||||
/// We will use clickhouse su instead
|
||||
command = fmt::format("{} su {}:{} {}", binary.string(), user, group, command);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// sudo respects limits in /etc/security/limits.conf e.g. open files,
|
||||
/// that's why we are using it instead of the 'clickhouse su' tool.
|
||||
command = fmt::format("sudo -u '{}' {}", user, command);
|
||||
}
|
||||
}
|
||||
|
||||
fmt::print("Will run {}\n", command);
|
||||
@ -1194,28 +1290,35 @@ int mainEntryClickHouseStart(int argc, char ** argv)
|
||||
("binary-path", po::value<std::string>()->default_value("usr/bin"), "directory with binary")
|
||||
("config-path", po::value<std::string>()->default_value("etc/clickhouse-server"), "directory with configs")
|
||||
("pid-path", po::value<std::string>()->default_value("var/run/clickhouse-server"), "directory for pid file")
|
||||
("user", po::value<std::string>()->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
||||
("user", po::value<std::string>()->implicit_value("")->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
||||
("group", po::value<std::string>()->implicit_value("")->default_value(DEFAULT_CLICKHOUSE_SERVER_GROUP), "clickhouse group")
|
||||
("max-tries", po::value<unsigned>()->default_value(60), "Max number of tries for waiting the server (with 1 second delay)")
|
||||
("no-sudo", po::bool_switch(), "Use clickhouse su if sudo is unavailable")
|
||||
;
|
||||
|
||||
po::variables_map options;
|
||||
po::store(po::parse_command_line(argc, argv, desc), options);
|
||||
|
||||
bool no_sudo = options["no-sudo"].as<bool>();
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " start", getuid() != 0) << '\n';
|
||||
std::cout << "Usage: " << formatWithSudo("clickhouse", !no_sudo && (getuid() != 0)) <<" start\n";
|
||||
std::cout << desc << '\n';
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string user = options["user"].as<std::string>();
|
||||
std::string group = options["group"].as<std::string>();
|
||||
|
||||
fs::path prefix = options["prefix"].as<std::string>();
|
||||
fs::path binary = prefix / options["binary-path"].as<std::string>() / "clickhouse";
|
||||
fs::path executable = prefix / options["binary-path"].as<std::string>() / "clickhouse-server";
|
||||
fs::path config = prefix / options["config-path"].as<std::string>() / "config.xml";
|
||||
fs::path pid_file = prefix / options["pid-path"].as<std::string>() / "clickhouse-server.pid";
|
||||
unsigned max_tries = options["max-tries"].as<unsigned>();
|
||||
|
||||
return start(user, executable, config, pid_file, max_tries);
|
||||
return start(user, group, binary, executable, config, pid_file, max_tries, no_sudo);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -1244,7 +1347,8 @@ int mainEntryClickHouseStop(int argc, char ** argv)
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " stop", getuid() != 0) << '\n';
|
||||
std::cout << "Usage: " << formatWithSudo("clickhouse", getuid() != 0) << " stop\n";
|
||||
std::cout << desc << '\n';
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1280,7 +1384,8 @@ int mainEntryClickHouseStatus(int argc, char ** argv)
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " status", getuid() != 0) << '\n';
|
||||
std::cout << "Usage: " << formatWithSudo("clickhouse", getuid() != 0) << " status\n";
|
||||
std::cout << desc << '\n';
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1310,24 +1415,31 @@ int mainEntryClickHouseRestart(int argc, char ** argv)
|
||||
("binary-path", po::value<std::string>()->default_value("usr/bin"), "directory with binary")
|
||||
("config-path", po::value<std::string>()->default_value("etc/clickhouse-server"), "directory with configs")
|
||||
("pid-path", po::value<std::string>()->default_value("var/run/clickhouse-server"), "directory for pid file")
|
||||
("user", po::value<std::string>()->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
||||
("user", po::value<std::string>()->implicit_value("")->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
||||
("group", po::value<std::string>()->implicit_value("")->default_value(DEFAULT_CLICKHOUSE_SERVER_GROUP), "clickhouse group")
|
||||
("force", po::value<bool>()->default_value(false), "Stop with KILL signal instead of TERM")
|
||||
("do-not-kill", po::bool_switch(), "Do not send KILL even if TERM did not help")
|
||||
("max-tries", po::value<unsigned>()->default_value(60), "Max number of tries for waiting the server (with 1 second delay)")
|
||||
("no-sudo", po::bool_switch(), "Use clickhouse su if sudo is unavailable")
|
||||
;
|
||||
|
||||
po::variables_map options;
|
||||
po::store(po::parse_command_line(argc, argv, desc), options);
|
||||
|
||||
bool no_sudo = options["no-sudo"].as<bool>();
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " restart", getuid() != 0) << '\n';
|
||||
std::cout << "Usage: " << formatWithSudo("clickhouse", !no_sudo && getuid() != 0) << " restart\n";
|
||||
std::cout << desc << '\n';
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string user = options["user"].as<std::string>();
|
||||
std::string group = options["group"].as<std::string>();
|
||||
|
||||
fs::path prefix = options["prefix"].as<std::string>();
|
||||
fs::path binary = prefix / options["binary-path"].as<std::string>() / "clickhouse";
|
||||
fs::path executable = prefix / options["binary-path"].as<std::string>() / "clickhouse-server";
|
||||
fs::path config = prefix / options["config-path"].as<std::string>() / "config.xml";
|
||||
fs::path pid_file = prefix / options["pid-path"].as<std::string>() / "clickhouse-server.pid";
|
||||
@ -1338,7 +1450,7 @@ int mainEntryClickHouseRestart(int argc, char ** argv)
|
||||
|
||||
if (int res = stop(pid_file, force, do_not_kill, max_tries))
|
||||
return res;
|
||||
return start(user, executable, config, pid_file, max_tries);
|
||||
return start(user, group, binary, executable, config, pid_file, max_tries, no_sudo);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -32,7 +32,7 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
std::cout << "Usage: " << argv[0] << " --zookeeper-logs-dir /var/lib/zookeeper/data/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/data/version-2 --output-dir /var/lib/clickhouse/coordination/snapshots" << std::endl;
|
||||
std::cout << "Usage: clickhouse keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/data/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/data/version-2 --output-dir /var/lib/clickhouse/coordination/snapshots" << std::endl;
|
||||
std::cout << desc << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ int Keeper::run()
|
||||
Poco::Util::HelpFormatter help_formatter(Keeper::options());
|
||||
auto header_str = fmt::format("{} [OPTION] [-- [ARG]...]\n"
|
||||
"positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010",
|
||||
commandName());
|
||||
commandName() == "clickhouse-keeper" ? "clickhouse-keeper" : commandName() + " keeper");
|
||||
help_formatter.setHeader(header_str);
|
||||
help_formatter.format(std::cout);
|
||||
return 0;
|
||||
|
@ -699,27 +699,46 @@ void LocalServer::processConfig()
|
||||
[[ maybe_unused ]] static std::string getHelpHeader()
|
||||
{
|
||||
return
|
||||
"usage: clickhouse-local [initial table definition] [--query <query>]\n"
|
||||
"Usage: clickhouse local [initial table definition] [--query <query>]\n"
|
||||
|
||||
"clickhouse-local allows to execute SQL queries on your data files via single command line call."
|
||||
" To do so, initially you need to define your data source and its format."
|
||||
" After you can execute your SQL queries in usual manner.\n"
|
||||
"clickhouse-local allows to execute SQL queries on your data files without running clickhouse-server.\n\n"
|
||||
|
||||
"It can run as command line tool that does single action or as interactive client."
|
||||
" For interactive experience you can just run 'clickhouse local' or add --interactive argument to your command."
|
||||
" It will set up tables, run queries and pass control as if it is clickhouse-client."
|
||||
" Then you can execute your SQL queries in usual manner."
|
||||
" Non-interactive mode requires query as an argument and exits when queries finish."
|
||||
" Multiple SQL queries can be passed as --query argument.\n\n"
|
||||
|
||||
"To configure initial environment two ways are supported: queries or command line parameters."
|
||||
|
||||
"There are two ways to define initial table keeping your data."
|
||||
" Either just in first query like this:\n"
|
||||
" CREATE TABLE <table> (<structure>) ENGINE = File(<input-format>, <file>);\n"
|
||||
"Either through corresponding command line parameters --table --structure --input-format and --file.";
|
||||
"Or through corresponding command line parameters --table --structure --input-format and --file.\n\n"
|
||||
|
||||
"clickhouse-local supports all features and engines of ClickHouse."
|
||||
" You can query data from remote engines and store results locally or other way around."
|
||||
" For table engines that actually store data on a disk like Log and MergeTree"
|
||||
" clickhouse-local puts data to temporary directory that is not reused between runs.\n\n"
|
||||
"clickhouse-local can be used to query data from stopped clickhouse-server installation with --path to"
|
||||
" local directory with data.\n";
|
||||
}
|
||||
|
||||
|
||||
[[ maybe_unused ]] static std::string getHelpFooter()
|
||||
{
|
||||
return
|
||||
"Note: If you have clickhouse installed on your system you can use 'clickhouse-local'"
|
||||
" invocation with a dash.\n\n"
|
||||
"Example printing memory used by each Unix user:\n"
|
||||
"ps aux | tail -n +2 | awk '{ printf(\"%s\\t%s\\n\", $1, $4) }' | "
|
||||
"clickhouse-local -S \"user String, mem Float64\" -q"
|
||||
" \"SELECT user, round(sum(mem), 2) as mem_total FROM table GROUP BY user ORDER"
|
||||
" BY mem_total DESC FORMAT PrettyCompact\"";
|
||||
" BY mem_total DESC FORMAT PrettyCompact\"\n\n"
|
||||
"Example reading file from S3, converting format and writing to a file:\n"
|
||||
"clickhouse-local --query \"SELECT c1 as version, c2 as date "
|
||||
"FROM url('https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/utils/list-versions/version_date.tsv')"
|
||||
" INTO OUTFILE '/tmp/versions.json'\"";
|
||||
}
|
||||
|
||||
|
||||
@ -727,7 +746,7 @@ void LocalServer::printHelpMessage([[maybe_unused]] const OptionsDescription & o
|
||||
{
|
||||
#if defined(FUZZING_MODE)
|
||||
std::cout <<
|
||||
"usage: clickhouse <clickhouse-local arguments> -- <libfuzzer arguments>\n"
|
||||
"Usage: clickhouse <clickhouse-local arguments> -- <libfuzzer arguments>\n"
|
||||
"Note: It is important not to use only one letter keys with single dash for \n"
|
||||
"for clickhouse-local arguments. It may work incorrectly.\n"
|
||||
|
||||
@ -895,4 +914,5 @@ catch (...)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -79,17 +79,43 @@ int mainEntryClickHouseRestart(int argc, char ** argv);
|
||||
int mainEntryClickHouseDisks(int argc, char ** argv);
|
||||
#endif
|
||||
|
||||
int mainEntryClickHouseHashBinary(int, char **)
|
||||
bool hasHelpArg (char* arg)
|
||||
{
|
||||
/// Intentionally without newline. So you can run:
|
||||
/// objcopy --add-section .clickhouse.hash=<(./clickhouse hash-binary) clickhouse
|
||||
return (strcmp(arg, "--help") == 0 || (strcmp(arg, "-h") == 0) || (strcmp(arg, "help") == 0));
|
||||
}
|
||||
|
||||
int mainEntryClickHouseHashBinary(int argc_, char ** argv_)
|
||||
{
|
||||
std::vector<char *> argv(argv_, argv_ + argc_);
|
||||
auto it = std::find_if(argv.begin(), argv.end(), hasHelpArg);
|
||||
if (it != argv.end())
|
||||
{
|
||||
std::cout << "Usage: clickhouse hash\nPrints hash of clickhouse binary.\n";
|
||||
std::cout << " -h, --help Prints this message\n";
|
||||
std::cout << "Result is intentionally without newline. So you can run:\n";
|
||||
std::cout << "objcopy --add-section .clickhouse.hash=<(./clickhouse hash-binary) clickhouse.\n\n";
|
||||
std::cout << "Current binary hash: ";
|
||||
}
|
||||
std::cout << getHashOfLoadedBinaryHex();
|
||||
return 0;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
void printHelp();
|
||||
|
||||
int mainEntryHelp(int, char **)
|
||||
{
|
||||
printHelp();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int printHelpOnError(int, char **)
|
||||
{
|
||||
printHelp();
|
||||
return -1;
|
||||
}
|
||||
|
||||
using MainFunc = int (*)(int, char**);
|
||||
|
||||
#if !defined(FUZZING_MODE)
|
||||
@ -150,14 +176,14 @@ std::pair<const char *, MainFunc> clickhouse_applications[] =
|
||||
#if ENABLE_CLICKHOUSE_DISKS
|
||||
{"disks", mainEntryClickHouseDisks},
|
||||
#endif
|
||||
{"help", mainEntryHelp},
|
||||
};
|
||||
|
||||
int printHelp(int, char **)
|
||||
void printHelp()
|
||||
{
|
||||
std::cerr << "Use one of the following commands:" << std::endl;
|
||||
std::cout << "Use one of the following commands:" << std::endl;
|
||||
for (auto & application : clickhouse_applications)
|
||||
std::cerr << "clickhouse " << application.first << " [args] " << std::endl;
|
||||
return -1;
|
||||
std::cout << "clickhouse " << application.first << " [args] " << std::endl;
|
||||
}
|
||||
|
||||
bool isClickhouseApp(const std::string & app_suffix, std::vector<char *> & argv)
|
||||
@ -467,7 +493,7 @@ int main(int argc_, char ** argv_)
|
||||
std::vector<char *> argv(argv_, argv_ + argc_);
|
||||
|
||||
/// Print a basic help if nothing was matched
|
||||
MainFunc main_func = printHelp;
|
||||
MainFunc main_func = printHelpOnError;
|
||||
|
||||
for (auto & application : clickhouse_applications)
|
||||
{
|
||||
|
@ -1243,10 +1243,10 @@ try
|
||||
|| !options.count("output-format"))
|
||||
{
|
||||
std::cout << documentation << "\n"
|
||||
<< "\nUsage: " << argv[0] << " [options] < in > out\n"
|
||||
<< "\nUsage: clickhouse obfuscator [options] < in > out\n"
|
||||
<< "\nInput must be seekable file (it will be read twice).\n"
|
||||
<< "\n" << description << "\n"
|
||||
<< "\nExample:\n " << argv[0] << " --seed \"$(head -c16 /dev/urandom | base64)\" --input-format TSV --output-format TSV --structure 'CounterID UInt32, URLDomain String, URL String, SearchPhrase String, Title String' < stats.tsv\n";
|
||||
<< "\nExample:\n clickhouse obfuscator --seed \"$(head -c16 /dev/urandom | base64)\" --input-format TSV --output-format TSV --structure 'CounterID UInt32, URLDomain String, URL String, SearchPhrase String, Title String' < stats.tsv\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -463,7 +463,7 @@ int Server::run()
|
||||
Poco::Util::HelpFormatter help_formatter(Server::options());
|
||||
auto header_str = fmt::format("{} [OPTION] [-- [ARG]...]\n"
|
||||
"positional arguments can be used to rewrite config.xml properties, for example, --http_port=8010",
|
||||
commandName());
|
||||
commandName() == "clickhouse-server" ? "clickhouse-server" : commandName() + " server");
|
||||
help_formatter.setHeader(header_str);
|
||||
help_formatter.format(std::cout);
|
||||
return 0;
|
||||
|
@ -145,7 +145,7 @@ try
|
||||
using namespace DB;
|
||||
namespace po = boost::program_options;
|
||||
|
||||
po::options_description description("Allowed options", getTerminalWidth());
|
||||
po::options_description description = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
description.add_options()
|
||||
("help,h", "produce help message")
|
||||
("metadata-path", po::value<std::string>(), "Metadata path (SELECT data_paths FROM system.tables WHERE name = 'table_name' AND database = 'database_name')")
|
||||
|
@ -107,7 +107,7 @@ try
|
||||
|
||||
if (argc < 3)
|
||||
{
|
||||
std::cout << "Usage: ./clickhouse su user:group ..." << std::endl;
|
||||
std::cout << "Usage: clickhouse su user:group ..." << std::endl;
|
||||
exit(0); // NOLINT(concurrency-mt-unsafe)
|
||||
}
|
||||
|
||||
|
@ -419,7 +419,7 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(MainConfigLoads, "Number of times the main configuration was reloaded.") \
|
||||
\
|
||||
M(AggregationPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for aggregation.") \
|
||||
M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were inited as two-level for aggregation.") \
|
||||
M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were initialiseded as two-level for aggregation.") \
|
||||
\
|
||||
M(MergeTreeMetadataCacheGet, "Number of rocksdb reads(used for merge tree metadata cache)") \
|
||||
M(MergeTreeMetadataCachePut, "Number of rocksdb puts(used for merge tree metadata cache)") \
|
||||
|
@ -3868,6 +3868,7 @@ class ClickHouseInstance:
|
||||
while local_counter < retries:
|
||||
if not self.get_process_pid("clickhouse server"):
|
||||
break
|
||||
self.exec_in_container(["bash", "-c", "ps aux"], user="root")
|
||||
time.sleep(0.5)
|
||||
local_counter += 1
|
||||
|
||||
@ -3928,6 +3929,7 @@ class ClickHouseInstance:
|
||||
while local_counter < retries:
|
||||
if not self.get_process_pid("clickhouse server"):
|
||||
break
|
||||
self.exec_in_container(["bash", "-c", "ps aux"], user="root")
|
||||
time.sleep(0.5)
|
||||
local_counter += 1
|
||||
|
||||
|
@ -1 +1 @@
|
||||
105
|
||||
336
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -0,0 +1,514 @@
|
||||
================BINARY==========================
|
||||
|
||||
clickhouse --help
|
||||
|
||||
Use one of the following commands:
|
||||
clickhouse local [args]
|
||||
clickhouse client [args]
|
||||
clickhouse benchmark [args]
|
||||
clickhouse server [args]
|
||||
clickhouse extract-from-config [args]
|
||||
clickhouse compressor [args]
|
||||
clickhouse format [args]
|
||||
clickhouse copier [args]
|
||||
clickhouse obfuscator [args]
|
||||
clickhouse git-import [args]
|
||||
clickhouse keeper [args]
|
||||
clickhouse keeper-converter [args]
|
||||
clickhouse install [args]
|
||||
clickhouse start [args]
|
||||
clickhouse stop [args]
|
||||
clickhouse status [args]
|
||||
clickhouse restart [args]
|
||||
clickhouse static-files-disk-uploader [args]
|
||||
clickhouse su [args]
|
||||
clickhouse hash-binary [args]
|
||||
clickhouse disks [args]
|
||||
clickhouse help [args]
|
||||
|
||||
clickhouse help
|
||||
|
||||
Use one of the following commands:
|
||||
clickhouse local [args]
|
||||
clickhouse client [args]
|
||||
clickhouse benchmark [args]
|
||||
clickhouse server [args]
|
||||
clickhouse extract-from-config [args]
|
||||
clickhouse compressor [args]
|
||||
clickhouse format [args]
|
||||
clickhouse copier [args]
|
||||
clickhouse obfuscator [args]
|
||||
clickhouse git-import [args]
|
||||
clickhouse keeper [args]
|
||||
clickhouse keeper-converter [args]
|
||||
clickhouse install [args]
|
||||
clickhouse start [args]
|
||||
clickhouse stop [args]
|
||||
clickhouse status [args]
|
||||
clickhouse restart [args]
|
||||
clickhouse static-files-disk-uploader [args]
|
||||
clickhouse su [args]
|
||||
clickhouse hash-binary [args]
|
||||
clickhouse disks [args]
|
||||
clickhouse help [args]
|
||||
|
||||
clickhouse server
|
||||
|
||||
usage:
|
||||
clickhouse server [OPTION] [-- [ARG]...]
|
||||
positional arguments can be used to rewrite config.xml properties, for
|
||||
example, --http_port=8010
|
||||
|
||||
-h, --help show help and exit
|
||||
-V, --version show version and exit
|
||||
-C<file>, --config-file=<file> load configuration from a given file
|
||||
-L<file>, --log-file=<file> use given log file
|
||||
-E<file>, --errorlog-file=<file> use given log file for errors only
|
||||
-P<file>, --pid-file=<file> use given pidfile
|
||||
--daemon Run application as a daemon.
|
||||
--umask=mask Set the daemon's umask (octal, e.g. 027).
|
||||
--pidfile=path Write the process ID of the application to
|
||||
given file.
|
||||
|
||||
clickhouse copier
|
||||
|
||||
usage: clickhouse copier --config-file <config-file> --task-path <task-path>
|
||||
Copies tables from one cluster to another
|
||||
|
||||
-C<file>, --config-file=<file>
|
||||
load
|
||||
configuration
|
||||
from
|
||||
a
|
||||
given
|
||||
file
|
||||
-L<file>, --log-file=<file>
|
||||
use
|
||||
given
|
||||
log
|
||||
file
|
||||
-E<file>, --errorlog-file=<file>
|
||||
use
|
||||
given
|
||||
log
|
||||
file
|
||||
for
|
||||
errors
|
||||
only
|
||||
-P<file>, --pid-file=<file>
|
||||
use
|
||||
given
|
||||
pidfile
|
||||
--daemon
|
||||
Run
|
||||
application
|
||||
as
|
||||
a
|
||||
daemon.
|
||||
--umask=mask
|
||||
Set
|
||||
the
|
||||
daemon's
|
||||
umask
|
||||
(octal,
|
||||
e.g.
|
||||
027).
|
||||
--pidfile=path
|
||||
Write
|
||||
the
|
||||
process
|
||||
ID
|
||||
of
|
||||
the
|
||||
application
|
||||
to
|
||||
given
|
||||
file.
|
||||
--task-path=task-path
|
||||
path
|
||||
to
|
||||
task
|
||||
in
|
||||
ZooKeeper
|
||||
--task-file=task-file
|
||||
path
|
||||
to
|
||||
task
|
||||
file
|
||||
for
|
||||
uploading
|
||||
in
|
||||
ZooKeeper
|
||||
to
|
||||
task-path
|
||||
--task-upload-force=task-upload-force
|
||||
Force
|
||||
upload
|
||||
task-file
|
||||
even
|
||||
node
|
||||
already
|
||||
exists
|
||||
--safe-mode
|
||||
disables
|
||||
ALTER
|
||||
DROP
|
||||
PARTITION
|
||||
in
|
||||
case
|
||||
of
|
||||
errors
|
||||
--copy-fault-probability=copy-fault-probability
|
||||
the
|
||||
copying
|
||||
fails
|
||||
with
|
||||
specified
|
||||
probability
|
||||
(used
|
||||
to
|
||||
test
|
||||
partition
|
||||
state
|
||||
recovering)
|
||||
--move-fault-probability=move-fault-probability
|
||||
the
|
||||
moving
|
||||
fails
|
||||
with
|
||||
specified
|
||||
probability
|
||||
(used
|
||||
to
|
||||
test
|
||||
partition
|
||||
state
|
||||
recovering)
|
||||
--log-level=log-level
|
||||
sets
|
||||
log
|
||||
level
|
||||
--base-dir=base-dir
|
||||
base
|
||||
directory
|
||||
for
|
||||
copiers,
|
||||
consecutive
|
||||
copier
|
||||
launches
|
||||
will
|
||||
populate
|
||||
/base-dir/launch_id/*
|
||||
directories
|
||||
--experimental-use-sample-offset=experimental-use-sample-offset
|
||||
Use
|
||||
SAMPLE
|
||||
OFFSET
|
||||
query
|
||||
instead
|
||||
of
|
||||
cityHash64(PRIMARY
|
||||
KEY)
|
||||
%
|
||||
n
|
||||
==
|
||||
k
|
||||
--status
|
||||
Get
|
||||
for
|
||||
status
|
||||
for
|
||||
current
|
||||
execution
|
||||
--max-table-tries=max-table-tries
|
||||
Number
|
||||
of
|
||||
tries
|
||||
for
|
||||
the
|
||||
copy
|
||||
table
|
||||
task
|
||||
--max-shard-partition-tries=max-shard-partition-tries
|
||||
Number
|
||||
of
|
||||
tries
|
||||
for
|
||||
the
|
||||
copy
|
||||
one
|
||||
partition
|
||||
task
|
||||
--max-shard-partition-piece-tries-for-alter=max-shard-partition-piece-tries-for-alter
|
||||
Number
|
||||
of
|
||||
tries
|
||||
for
|
||||
final
|
||||
ALTER
|
||||
ATTACH
|
||||
to
|
||||
destination
|
||||
table
|
||||
--retry-delay-ms=retry-delay-ms
|
||||
Delay
|
||||
between
|
||||
task
|
||||
retries
|
||||
--help
|
||||
produce
|
||||
this
|
||||
help
|
||||
message
|
||||
|
||||
clickhouse keeper
|
||||
|
||||
usage:
|
||||
clickhouse keeper [OPTION] [-- [ARG]...]
|
||||
positional arguments can be used to rewrite config.xml properties, for
|
||||
example, --http_port=8010
|
||||
|
||||
-h, --help show help and exit
|
||||
-V, --version show version and exit
|
||||
-force-recovery, --force-recovery Force recovery mode allowing Keeper to
|
||||
overwrite cluster configuration without
|
||||
quorum
|
||||
-C<file>, --config-file=<file> load configuration from a given file
|
||||
-L<file>, --log-file=<file> use given log file
|
||||
-E<file>, --errorlog-file=<file> use given log file for errors only
|
||||
-P<file>, --pid-file=<file> use given pidfile
|
||||
--daemon Run application as a daemon.
|
||||
--umask=mask Set the daemon's umask (octal, e.g. 027).
|
||||
--pidfile=path Write the process ID of the application to
|
||||
given file.
|
||||
================SYMLINK==============================
|
||||
|
||||
clickhouse-server
|
||||
|
||||
usage:
|
||||
clickhouse-server [OPTION] [-- [ARG]...]
|
||||
positional arguments can be used to rewrite config.xml properties, for
|
||||
example, --http_port=8010
|
||||
|
||||
-h, --help show help and exit
|
||||
-V, --version show version and exit
|
||||
-C<file>, --config-file=<file> load configuration from a given file
|
||||
-L<file>, --log-file=<file> use given log file
|
||||
-E<file>, --errorlog-file=<file> use given log file for errors only
|
||||
-P<file>, --pid-file=<file> use given pidfile
|
||||
--daemon Run application as a daemon.
|
||||
--umask=mask Set the daemon's umask (octal, e.g. 027).
|
||||
--pidfile=path Write the process ID of the application to
|
||||
given file.
|
||||
|
||||
clickhouse-copier
|
||||
|
||||
usage: clickhouse-copier --config-file <config-file> --task-path <task-path>
|
||||
Copies tables from one cluster to another
|
||||
|
||||
-C<file>, --config-file=<file>
|
||||
load
|
||||
configuration
|
||||
from
|
||||
a
|
||||
given
|
||||
file
|
||||
-L<file>, --log-file=<file>
|
||||
use
|
||||
given
|
||||
log
|
||||
file
|
||||
-E<file>, --errorlog-file=<file>
|
||||
use
|
||||
given
|
||||
log
|
||||
file
|
||||
for
|
||||
errors
|
||||
only
|
||||
-P<file>, --pid-file=<file>
|
||||
use
|
||||
given
|
||||
pidfile
|
||||
--daemon
|
||||
Run
|
||||
application
|
||||
as
|
||||
a
|
||||
daemon.
|
||||
--umask=mask
|
||||
Set
|
||||
the
|
||||
daemon's
|
||||
umask
|
||||
(octal,
|
||||
e.g.
|
||||
027).
|
||||
--pidfile=path
|
||||
Write
|
||||
the
|
||||
process
|
||||
ID
|
||||
of
|
||||
the
|
||||
application
|
||||
to
|
||||
given
|
||||
file.
|
||||
--task-path=task-path
|
||||
path
|
||||
to
|
||||
task
|
||||
in
|
||||
ZooKeeper
|
||||
--task-file=task-file
|
||||
path
|
||||
to
|
||||
task
|
||||
file
|
||||
for
|
||||
uploading
|
||||
in
|
||||
ZooKeeper
|
||||
to
|
||||
task-path
|
||||
--task-upload-force=task-upload-force
|
||||
Force
|
||||
upload
|
||||
task-file
|
||||
even
|
||||
node
|
||||
already
|
||||
exists
|
||||
--safe-mode
|
||||
disables
|
||||
ALTER
|
||||
DROP
|
||||
PARTITION
|
||||
in
|
||||
case
|
||||
of
|
||||
errors
|
||||
--copy-fault-probability=copy-fault-probability
|
||||
the
|
||||
copying
|
||||
fails
|
||||
with
|
||||
specified
|
||||
probability
|
||||
(used
|
||||
to
|
||||
test
|
||||
partition
|
||||
state
|
||||
recovering)
|
||||
--move-fault-probability=move-fault-probability
|
||||
the
|
||||
moving
|
||||
fails
|
||||
with
|
||||
specified
|
||||
probability
|
||||
(used
|
||||
to
|
||||
test
|
||||
partition
|
||||
state
|
||||
recovering)
|
||||
--log-level=log-level
|
||||
sets
|
||||
log
|
||||
level
|
||||
--base-dir=base-dir
|
||||
base
|
||||
directory
|
||||
for
|
||||
copiers,
|
||||
consecutive
|
||||
copier
|
||||
launches
|
||||
will
|
||||
populate
|
||||
/base-dir/launch_id/*
|
||||
directories
|
||||
--experimental-use-sample-offset=experimental-use-sample-offset
|
||||
Use
|
||||
SAMPLE
|
||||
OFFSET
|
||||
query
|
||||
instead
|
||||
of
|
||||
cityHash64(PRIMARY
|
||||
KEY)
|
||||
%
|
||||
n
|
||||
==
|
||||
k
|
||||
--status
|
||||
Get
|
||||
for
|
||||
status
|
||||
for
|
||||
current
|
||||
execution
|
||||
--max-table-tries=max-table-tries
|
||||
Number
|
||||
of
|
||||
tries
|
||||
for
|
||||
the
|
||||
copy
|
||||
table
|
||||
task
|
||||
--max-shard-partition-tries=max-shard-partition-tries
|
||||
Number
|
||||
of
|
||||
tries
|
||||
for
|
||||
the
|
||||
copy
|
||||
one
|
||||
partition
|
||||
task
|
||||
--max-shard-partition-piece-tries-for-alter=max-shard-partition-piece-tries-for-alter
|
||||
Number
|
||||
of
|
||||
tries
|
||||
for
|
||||
final
|
||||
ALTER
|
||||
ATTACH
|
||||
to
|
||||
destination
|
||||
table
|
||||
--retry-delay-ms=retry-delay-ms
|
||||
Delay
|
||||
between
|
||||
task
|
||||
retries
|
||||
--help
|
||||
produce
|
||||
this
|
||||
help
|
||||
message
|
||||
|
||||
clickhouse-keeper
|
||||
|
||||
usage:
|
||||
clickhouse-keeper [OPTION] [-- [ARG]...]
|
||||
positional arguments can be used to rewrite config.xml properties, for
|
||||
example, --http_port=8010
|
||||
|
||||
-h, --help show help and exit
|
||||
-V, --version show version and exit
|
||||
-force-recovery, --force-recovery Force recovery mode allowing Keeper to
|
||||
overwrite cluster configuration without
|
||||
quorum
|
||||
-C<file>, --config-file=<file> load configuration from a given file
|
||||
-L<file>, --log-file=<file> use given log file
|
||||
-E<file>, --errorlog-file=<file> use given log file for errors only
|
||||
-P<file>, --pid-file=<file> use given pidfile
|
||||
--daemon Run application as a daemon.
|
||||
--umask=mask Set the daemon's umask (octal, e.g. 027).
|
||||
--pidfile=path Write the process ID of the application to
|
||||
given file.
|
38
tests/queries/0_stateless/02598_clickhouse_help_help_formatter.sh
Executable file
38
tests/queries/0_stateless/02598_clickhouse_help_help_formatter.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
# We have to use fixed terminal width. It may break other tests results formatting.
|
||||
# In CI there is no tty and we just ignore failed stty calls.
|
||||
# Set 80 to have same as default size as in notty.
|
||||
backup_stty_size=$(stty size 2>/dev/null | awk '{print $2}' ||:)
|
||||
stty columns 78 2>/dev/null ||:
|
||||
|
||||
echo "================BINARY=========================="
|
||||
|
||||
echo -e "\nclickhouse --help\n"
|
||||
$CLICKHOUSE_BINARY --help
|
||||
echo -e "\nclickhouse help\n"
|
||||
$CLICKHOUSE_BINARY help
|
||||
|
||||
echo -e "\nclickhouse server\n"
|
||||
$CLICKHOUSE_BINARY server --help | perl -0777 -pe 's/Main options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse copier\n"
|
||||
$CLICKHOUSE_BINARY copier --help
|
||||
echo -e "\nclickhouse keeper\n"
|
||||
$CLICKHOUSE_BINARY keeper --help
|
||||
|
||||
echo "================SYMLINK=============================="
|
||||
|
||||
echo -e "\nclickhouse-server\n"
|
||||
${CLICKHOUSE_BINARY}-server --help | perl -0777 -pe 's/Main options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-copier\n"
|
||||
${CLICKHOUSE_BINARY}-copier --help
|
||||
echo -e "\nclickhouse-keeper\n"
|
||||
${CLICKHOUSE_BINARY}-keeper --help
|
||||
|
||||
stty columns $backup_stty_size 2>/dev/null ||:
|
@ -0,0 +1,784 @@
|
||||
================BINARY==========================
|
||||
|
||||
clickhouse --help
|
||||
|
||||
Use one of the following commands:
|
||||
clickhouse local [args]
|
||||
clickhouse client [args]
|
||||
clickhouse benchmark [args]
|
||||
clickhouse server [args]
|
||||
clickhouse extract-from-config [args]
|
||||
clickhouse compressor [args]
|
||||
clickhouse format [args]
|
||||
clickhouse copier [args]
|
||||
clickhouse obfuscator [args]
|
||||
clickhouse git-import [args]
|
||||
clickhouse keeper [args]
|
||||
clickhouse keeper-converter [args]
|
||||
clickhouse install [args]
|
||||
clickhouse start [args]
|
||||
clickhouse stop [args]
|
||||
clickhouse status [args]
|
||||
clickhouse restart [args]
|
||||
clickhouse static-files-disk-uploader [args]
|
||||
clickhouse su [args]
|
||||
clickhouse hash-binary [args]
|
||||
clickhouse disks [args]
|
||||
clickhouse help [args]
|
||||
|
||||
clickhouse help
|
||||
|
||||
Use one of the following commands:
|
||||
clickhouse local [args]
|
||||
clickhouse client [args]
|
||||
clickhouse benchmark [args]
|
||||
clickhouse server [args]
|
||||
clickhouse extract-from-config [args]
|
||||
clickhouse compressor [args]
|
||||
clickhouse format [args]
|
||||
clickhouse copier [args]
|
||||
clickhouse obfuscator [args]
|
||||
clickhouse git-import [args]
|
||||
clickhouse keeper [args]
|
||||
clickhouse keeper-converter [args]
|
||||
clickhouse install [args]
|
||||
clickhouse start [args]
|
||||
clickhouse stop [args]
|
||||
clickhouse status [args]
|
||||
clickhouse restart [args]
|
||||
clickhouse static-files-disk-uploader [args]
|
||||
clickhouse su [args]
|
||||
clickhouse hash-binary [args]
|
||||
clickhouse disks [args]
|
||||
clickhouse help [args]
|
||||
|
||||
clickhouse benchmark
|
||||
|
||||
Usage: clickhouse benchmark [options] < queries.txt
|
||||
Usage: clickhouse benchmark [options] --query "query text"
|
||||
clickhouse-benchmark connects to ClickHouse server, repeatedly sends specified queries and produces reports query statistics.
|
||||
|
||||
clickhouse client
|
||||
|
||||
Usage: clickhouse client [initial table definition] [--query <query>]
|
||||
clickhouse-client is a client application that is used to connect to ClickHouse.
|
||||
It can run queries as command line tool if you pass queries as an argument or as interactive client. Queries can run one at a time, or in in a multiquery mode with --multiquery option. To change settings you may use 'SET' statements and SETTINGS clause in queries or set is for a session with corresponding clickhouse-client arguments.
|
||||
'clickhouse client' command will try connect to clickhouse-server running on the same server. If you have credentials set up pass them with --user <username> --password <password> or with --ask-password argument that will open command prompt.
|
||||
|
||||
This one will try connect to tcp native port(9000) without encryption:
|
||||
clickhouse client --host clickhouse.example.com --password mysecretpassword
|
||||
To connect to secure endpoint just set --secure argument. If you have artered port set it with --port <your port>.
|
||||
clickhouse client --secure --host clickhouse.example.com --password mysecretpassword
|
||||
|
||||
|
||||
clickhouse local
|
||||
|
||||
Usage: clickhouse local [initial table definition] [--query <query>]
|
||||
clickhouse-local allows to execute SQL queries on your data files without running clickhouse-server.
|
||||
|
||||
It can run as command line tool that does single action or as interactive client. For interactive experience you can just run 'clickhouse local' or add --interactive argument to your command. It will set up tables, run queries and pass control as if it is clickhouse-client. Then you can execute your SQL queries in usual manner. Non-interactive mode requires query as an argument and exits when queries finish. Multiple SQL queries can be passed as --query argument.
|
||||
|
||||
To configure initial environment two ways are supported: queries or command line parameters. Either just in first query like this:
|
||||
CREATE TABLE <table> (<structure>) ENGINE = File(<input-format>, <file>);
|
||||
Or through corresponding command line parameters --table --structure --input-format and --file.
|
||||
|
||||
clickhouse-local supports all features and engines of ClickHouse. You can query data from remote engines and store results locally or other way around. For table engines that actually store data on a disk like Log and MergeTree clickhouse-local puts data to temporary directory that is not reused between runs.
|
||||
|
||||
clickhouse-local can be used to query data from stopped clickhouse-server installation with --path to local directory with data.
|
||||
|
||||
Example reading file from S3, converting format and writing to a file:
|
||||
clickhouse-local --query "SELECT c1 as version, c2 as date FROM url('https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/utils/list-versions/version_date.tsv') INTO OUTFILE '/tmp/versions.json'"
|
||||
|
||||
clickhouse compressor
|
||||
|
||||
Usage: clickhouse compressor [options] < INPUT > OUTPUT
|
||||
Alternative usage: clickhouse compressor [options] INPUT OUTPUT
|
||||
|
||||
clickhouse disks
|
||||
|
||||
ClickHouse disk management tool
|
||||
usage clickhouse disks [OPTION]
|
||||
clickhouse-disks
|
||||
|
||||
read read File `from_path` to `to_path` or to stdout
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
write Write File `from_path` or stdin to `to_path`
|
||||
|
||||
link Create hardlink from `from_path` to `to_path`
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
mkdir Create directory or directories recursively
|
||||
|
||||
remove Remove file or directory with all children. Throws exception if file doesn't exists.
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
move Move file or directory from `from_path` to `to_path`
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
copy Recursively copy data containing at `from_path` to `to_path`
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
list List files (the default disk is used by default)
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
list-disks List disks names
|
||||
|
||||
clickhouse-disks:
|
||||
-h [ --help ] Print common help message
|
||||
-C [ --config-file ] arg
|
||||
Set config file
|
||||
--disk arg Set disk name
|
||||
--command_name arg
|
||||
Name for command to do
|
||||
--save-logs Save logs to a file
|
||||
--log-level arg Logging level
|
||||
|
||||
|
||||
clickhouse extract
|
||||
|
||||
Preprocess config file and extract value of the given key.
|
||||
|
||||
Usage: clickhouse extract-from-config [options]
|
||||
|
||||
Allowed options:
|
||||
--help produce this help message
|
||||
--stacktrace print stack traces of exceptions
|
||||
--process-zk-includes if there are from_zk elements in config, connect to
|
||||
ZooKeeper and process them
|
||||
--try Do not warn about missing keys
|
||||
--log-level arg (=error) log level
|
||||
-c [ --config-file ] arg path to config file
|
||||
-k [ --key ] arg key to get value for
|
||||
|
||||
|
||||
clickhouse format
|
||||
|
||||
Usage: clickhouse format [options] < query
|
||||
|
||||
clickhouse git-import
|
||||
|
||||
|
||||
A tool to extract information from Git repository for analytics.
|
||||
|
||||
It dumps the data for the following tables:
|
||||
- commits - commits with statistics;
|
||||
- file_changes - files changed in every commit with the info about the change and statistics;
|
||||
- line_changes - every changed line in every changed file in every commit with full info about the line and the information about previous change of this line.
|
||||
|
||||
The largest and the most important table is "line_changes".
|
||||
|
||||
Allows to answer questions like:
|
||||
- list files with maximum number of authors;
|
||||
- show me the oldest lines of code in the repository;
|
||||
- show me the files with longest history;
|
||||
- list favorite files for author;
|
||||
- list largest files with lowest number of authors;
|
||||
- at what weekday the code has highest chance to stay in repository;
|
||||
- the distribution of code age across repository;
|
||||
- files sorted by average code age;
|
||||
- quickly show file with blame info (rough);
|
||||
- commits and lines of code distribution by time; by weekday, by author; for specific subdirectories;
|
||||
- show history for every subdirectory, file, line of file, the number of changes (lines and commits) across time; how the number of contributors was changed across time;
|
||||
- list files with most modifications;
|
||||
- list files that were rewritten most number of time or by most of authors;
|
||||
- what is percentage of code removal by other authors, across authors;
|
||||
- the matrix of authors that shows what authors tends to rewrite another authors code;
|
||||
- what is the worst time to write code in sense that the code has highest chance to be rewritten;
|
||||
- the average time before code will be rewritten and the median (half-life of code decay);
|
||||
- comments/code percentage change in time / by author / by location;
|
||||
- who tend to write more tests / cpp code / comments.
|
||||
|
||||
The data is intended for analytical purposes. It can be imprecise by many reasons but it should be good enough for its purpose.
|
||||
|
||||
The data is not intended to provide any conclusions for managers, it is especially counter-indicative for any kinds of "performance review". Instead you can spend multiple days looking at various interesting statistics.
|
||||
|
||||
Run this tool inside your git repository. It will create .tsv files that can be loaded into ClickHouse (or into other DBMS if you dare).
|
||||
|
||||
The tool can process large enough repositories in a reasonable time.
|
||||
It has been tested on:
|
||||
- ClickHouse: 31 seconds; 3 million rows;
|
||||
- LLVM: 8 minutes; 62 million rows;
|
||||
- Linux - 12 minutes; 85 million rows;
|
||||
- Chromium - 67 minutes; 343 million rows;
|
||||
(the numbers as of Sep 2020)
|
||||
|
||||
|
||||
Prepare the database by executing the following queries:
|
||||
|
||||
DROP DATABASE IF EXISTS git;
|
||||
CREATE DATABASE git;
|
||||
|
||||
CREATE TABLE git.commits
|
||||
(
|
||||
hash String,
|
||||
author LowCardinality(String),
|
||||
time DateTime,
|
||||
message String,
|
||||
files_added UInt32,
|
||||
files_deleted UInt32,
|
||||
files_renamed UInt32,
|
||||
files_modified UInt32,
|
||||
lines_added UInt32,
|
||||
lines_deleted UInt32,
|
||||
hunks_added UInt32,
|
||||
hunks_removed UInt32,
|
||||
hunks_changed UInt32
|
||||
) ENGINE = MergeTree ORDER BY time;
|
||||
|
||||
CREATE TABLE git.file_changes
|
||||
(
|
||||
change_type Enum('Add' = 1, 'Delete' = 2, 'Modify' = 3, 'Rename' = 4, 'Copy' = 5, 'Type' = 6),
|
||||
path LowCardinality(String),
|
||||
old_path LowCardinality(String),
|
||||
file_extension LowCardinality(String),
|
||||
lines_added UInt32,
|
||||
lines_deleted UInt32,
|
||||
hunks_added UInt32,
|
||||
hunks_removed UInt32,
|
||||
hunks_changed UInt32,
|
||||
|
||||
commit_hash String,
|
||||
author LowCardinality(String),
|
||||
time DateTime,
|
||||
commit_message String,
|
||||
commit_files_added UInt32,
|
||||
commit_files_deleted UInt32,
|
||||
commit_files_renamed UInt32,
|
||||
commit_files_modified UInt32,
|
||||
commit_lines_added UInt32,
|
||||
commit_lines_deleted UInt32,
|
||||
commit_hunks_added UInt32,
|
||||
commit_hunks_removed UInt32,
|
||||
commit_hunks_changed UInt32
|
||||
) ENGINE = MergeTree ORDER BY time;
|
||||
|
||||
CREATE TABLE git.line_changes
|
||||
(
|
||||
sign Int8,
|
||||
line_number_old UInt32,
|
||||
line_number_new UInt32,
|
||||
hunk_num UInt32,
|
||||
hunk_start_line_number_old UInt32,
|
||||
hunk_start_line_number_new UInt32,
|
||||
hunk_lines_added UInt32,
|
||||
hunk_lines_deleted UInt32,
|
||||
hunk_context LowCardinality(String),
|
||||
line LowCardinality(String),
|
||||
indent UInt8,
|
||||
line_type Enum('Empty' = 0, 'Comment' = 1, 'Punct' = 2, 'Code' = 3),
|
||||
|
||||
prev_commit_hash String,
|
||||
prev_author LowCardinality(String),
|
||||
prev_time DateTime,
|
||||
|
||||
file_change_type Enum('Add' = 1, 'Delete' = 2, 'Modify' = 3, 'Rename' = 4, 'Copy' = 5, 'Type' = 6),
|
||||
path LowCardinality(String),
|
||||
old_path LowCardinality(String),
|
||||
file_extension LowCardinality(String),
|
||||
file_lines_added UInt32,
|
||||
file_lines_deleted UInt32,
|
||||
file_hunks_added UInt32,
|
||||
file_hunks_removed UInt32,
|
||||
file_hunks_changed UInt32,
|
||||
|
||||
commit_hash String,
|
||||
author LowCardinality(String),
|
||||
time DateTime,
|
||||
commit_message String,
|
||||
commit_files_added UInt32,
|
||||
commit_files_deleted UInt32,
|
||||
commit_files_renamed UInt32,
|
||||
commit_files_modified UInt32,
|
||||
commit_lines_added UInt32,
|
||||
commit_lines_deleted UInt32,
|
||||
commit_hunks_added UInt32,
|
||||
commit_hunks_removed UInt32,
|
||||
commit_hunks_changed UInt32
|
||||
) ENGINE = MergeTree ORDER BY time;
|
||||
|
||||
Run the tool.
|
||||
|
||||
Then insert the data with the following commands:
|
||||
|
||||
clickhouse-client --query "INSERT INTO git.commits FORMAT TSV" < commits.tsv
|
||||
clickhouse-client --query "INSERT INTO git.file_changes FORMAT TSV" < file_changes.tsv
|
||||
clickhouse-client --query "INSERT INTO git.line_changes FORMAT TSV" < line_changes.tsv
|
||||
|
||||
|
||||
Usage: clickhouse git-import
|
||||
clickhouse git-import --skip-paths 'generated\.cpp|^(contrib|docs?|website|libs/(libcityhash|liblz4|libdivide|libvectorclass|libdouble-conversion|libcpuid|libzstd|libfarmhash|libmetrohash|libpoco|libwidechar_width))/' --skip-commits-with-messages '^Merge branch '
|
||||
|
||||
clickhouse install
|
||||
|
||||
Usage: clickhouse install [options]
|
||||
-h [ --help ] produce help message
|
||||
--prefix arg (=/) prefix for all paths
|
||||
--binary-path arg (=usr/bin) where to install binaries
|
||||
--config-path arg (=etc/clickhouse-server)
|
||||
where to install configs
|
||||
--log-path arg (=var/log/clickhouse-server)
|
||||
where to create log directory
|
||||
--data-path arg (=var/lib/clickhouse) directory for data
|
||||
--pid-path arg (=var/run/clickhouse-server)
|
||||
directory for pid file
|
||||
--user arg (=clickhouse) clickhouse user
|
||||
--group arg (=clickhouse) clickhouse group
|
||||
-y [ --noninteractive ] run non-interactively
|
||||
--link create symlink to the binary instead of
|
||||
copying to binary-path
|
||||
|
||||
|
||||
clickhouse keeper-converter
|
||||
|
||||
Usage: clickhouse keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/data/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/data/version-2 --output-dir /var/lib/clickhouse/coordination/snapshots
|
||||
Allowed options:
|
||||
-h [ --help ] produce help message
|
||||
--zookeeper-logs-dir arg
|
||||
Path to directory with ZooKeeper logs
|
||||
--zookeeper-snapshots-dir arg
|
||||
Path to directory with ZooKeeper
|
||||
snapshots
|
||||
--output-dir arg Directory to place output
|
||||
clickhouse-keeper snapshot
|
||||
|
||||
|
||||
clickhouse obfuscator
|
||||
|
||||
|
||||
Simple tool for table data obfuscation.
|
||||
|
||||
It reads input table and produces output table, that retain some properties of input, but contains different data.
|
||||
It allows to publish almost real production data for usage in benchmarks.
|
||||
|
||||
It is designed to retain the following properties of data:
|
||||
- cardinalities of values (number of distinct values) for every column and for every tuple of columns;
|
||||
- conditional cardinalities: number of distinct values of one column under condition on value of another column;
|
||||
- probability distributions of absolute value of integers; sign of signed integers; exponent and sign for floats;
|
||||
- probability distributions of length of strings;
|
||||
- probability of zero values of numbers; empty strings and arrays, NULLs;
|
||||
- data compression ratio when compressed with LZ77 and entropy family of codecs;
|
||||
- continuity (magnitude of difference) of time values across table; continuity of floating point values.
|
||||
- date component of DateTime values;
|
||||
- UTF-8 validity of string values;
|
||||
- string values continue to look somewhat natural.
|
||||
|
||||
Most of the properties above are viable for performance testing:
|
||||
- reading data, filtering, aggregation and sorting will work at almost the same speed
|
||||
as on original data due to saved cardinalities, magnitudes, compression ratios, etc.
|
||||
|
||||
It works in deterministic fashion: you define a seed value and transform is totally determined by input data and by seed.
|
||||
Some transforms are one to one and could be reversed, so you need to have large enough seed and keep it in secret.
|
||||
|
||||
It use some cryptographic primitives to transform data, but from the cryptographic point of view,
|
||||
it doesn't do anything properly and you should never consider the result as secure, unless you have other reasons for it.
|
||||
|
||||
It may retain some data you don't want to publish.
|
||||
|
||||
It always leave numbers 0, 1, -1 as is. Also it leaves dates, lengths of arrays and null flags exactly as in source data.
|
||||
For example, you have a column IsMobile in your table with values 0 and 1. In transformed data, it will have the same value.
|
||||
So, the user will be able to count exact ratio of mobile traffic.
|
||||
|
||||
Another example, suppose you have some private data in your table, like user email and you don't want to publish any single email address.
|
||||
If your table is large enough and contain multiple different emails and there is no email that have very high frequency than all others,
|
||||
it will perfectly anonymize all data. But if you have small amount of different values in a column, it can possibly reproduce some of them.
|
||||
And you should take care and look at exact algorithm, how this tool works, and probably fine tune some of it command line parameters.
|
||||
|
||||
This tool works fine only with reasonable amount of data (at least 1000s of rows).
|
||||
|
||||
|
||||
Usage: clickhouse obfuscator [options] < in > out
|
||||
|
||||
Input must be seekable file (it will be read twice).
|
||||
|
||||
Example:
|
||||
clickhouse obfuscator --seed "$(head -c16 /dev/urandom | base64)" --input-format TSV --output-format TSV --structure 'CounterID UInt32, URLDomain String, URL String, SearchPhrase String, Title String' < stats.tsv
|
||||
|
||||
clickhouse static
|
||||
|
||||
|
||||
clickhouse start
|
||||
|
||||
Usage: clickhouse start
|
||||
-h [ --help ] produce help message
|
||||
--prefix arg (=/) prefix for all paths
|
||||
--binary-path arg (=usr/bin) directory with binary
|
||||
--config-path arg (=etc/clickhouse-server)
|
||||
directory with configs
|
||||
--pid-path arg (=var/run/clickhouse-server)
|
||||
directory for pid file
|
||||
--user arg (=clickhouse) clickhouse user
|
||||
--group arg (=clickhouse) clickhouse group
|
||||
--max-tries arg (=60) Max number of tries for waiting the
|
||||
server (with 1 second delay)
|
||||
--no-sudo Use clickhouse su if sudo is
|
||||
unavailable
|
||||
|
||||
|
||||
clickhouse stop
|
||||
|
||||
Usage: clickhouse stop
|
||||
-h [ --help ] produce help message
|
||||
--prefix arg (=/) prefix for all paths
|
||||
--pid-path arg (=var/run/clickhouse-server)
|
||||
directory for pid file
|
||||
--force Stop with KILL signal instead of TERM
|
||||
--do-not-kill Do not send KILL even if TERM did not
|
||||
help
|
||||
--max-tries arg (=60) Max number of tries for waiting the
|
||||
server to finish after sending TERM
|
||||
(with 1 second delay)
|
||||
|
||||
|
||||
clickhouse status
|
||||
|
||||
Usage: clickhouse status
|
||||
-h [ --help ] produce help message
|
||||
--prefix arg (=/) prefix for all paths
|
||||
--pid-path arg (=var/run/clickhouse-server)
|
||||
directory for pid file
|
||||
|
||||
|
||||
clickhouse restart
|
||||
|
||||
Usage: clickhouse restart
|
||||
-h [ --help ] produce help message
|
||||
--prefix arg (=/) prefix for all paths
|
||||
--binary-path arg (=usr/bin) directory with binary
|
||||
--config-path arg (=etc/clickhouse-server)
|
||||
directory with configs
|
||||
--pid-path arg (=var/run/clickhouse-server)
|
||||
directory for pid file
|
||||
--user arg (=clickhouse) clickhouse user
|
||||
--group arg (=clickhouse) clickhouse group
|
||||
--force arg (=0) Stop with KILL signal instead of TERM
|
||||
--do-not-kill Do not send KILL even if TERM did not
|
||||
help
|
||||
--max-tries arg (=60) Max number of tries for waiting the
|
||||
server (with 1 second delay)
|
||||
--no-sudo Use clickhouse su if sudo is
|
||||
unavailable
|
||||
|
||||
|
||||
clickhouse su
|
||||
|
||||
Usage: clickhouse su user:group ...
|
||||
|
||||
clickhouse hash
|
||||
|
||||
Usage: clickhouse hash
|
||||
Prints hash of clickhouse binary.
|
||||
-h, --help Prints this message
|
||||
Result is intentionally without newline. So you can run:
|
||||
objcopy --add-section .clickhouse.hash=<(./clickhouse hash-binary) clickhouse.
|
||||
|
||||
================SYMLINK==============================
|
||||
|
||||
clickhouse-local
|
||||
|
||||
Usage: clickhouse local [initial table definition] [--query <query>]
|
||||
clickhouse-local allows to execute SQL queries on your data files without running clickhouse-server.
|
||||
|
||||
It can run as command line tool that does single action or as interactive client. For interactive experience you can just run 'clickhouse local' or add --interactive argument to your command. It will set up tables, run queries and pass control as if it is clickhouse-client. Then you can execute your SQL queries in usual manner. Non-interactive mode requires query as an argument and exits when queries finish. Multiple SQL queries can be passed as --query argument.
|
||||
|
||||
To configure initial environment two ways are supported: queries or command line parameters. Either just in first query like this:
|
||||
CREATE TABLE <table> (<structure>) ENGINE = File(<input-format>, <file>);
|
||||
Or through corresponding command line parameters --table --structure --input-format and --file.
|
||||
|
||||
clickhouse-local supports all features and engines of ClickHouse. You can query data from remote engines and store results locally or other way around. For table engines that actually store data on a disk like Log and MergeTree clickhouse-local puts data to temporary directory that is not reused between runs.
|
||||
|
||||
clickhouse-local can be used to query data from stopped clickhouse-server installation with --path to local directory with data.
|
||||
|
||||
Example reading file from S3, converting format and writing to a file:
|
||||
clickhouse-local --query "SELECT c1 as version, c2 as date FROM url('https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/utils/list-versions/version_date.tsv') INTO OUTFILE '/tmp/versions.json'"
|
||||
|
||||
clickhouse-client
|
||||
|
||||
Usage: clickhouse client [initial table definition] [--query <query>]
|
||||
clickhouse-client is a client application that is used to connect to ClickHouse.
|
||||
It can run queries as command line tool if you pass queries as an argument or as interactive client. Queries can run one at a time, or in in a multiquery mode with --multiquery option. To change settings you may use 'SET' statements and SETTINGS clause in queries or set is for a session with corresponding clickhouse-client arguments.
|
||||
'clickhouse client' command will try connect to clickhouse-server running on the same server. If you have credentials set up pass them with --user <username> --password <password> or with --ask-password argument that will open command prompt.
|
||||
|
||||
This one will try connect to tcp native port(9000) without encryption:
|
||||
clickhouse client --host clickhouse.example.com --password mysecretpassword
|
||||
To connect to secure endpoint just set --secure argument. If you have artered port set it with --port <your port>.
|
||||
clickhouse client --secure --host clickhouse.example.com --password mysecretpassword
|
||||
|
||||
|
||||
clickhouse-benchmark
|
||||
|
||||
Usage: clickhouse benchmark [options] < queries.txt
|
||||
Usage: clickhouse benchmark [options] --query "query text"
|
||||
clickhouse-benchmark connects to ClickHouse server, repeatedly sends specified queries and produces reports query statistics.
|
||||
|
||||
clickhouse-extract
|
||||
|
||||
Preprocess config file and extract value of the given key.
|
||||
|
||||
Usage: clickhouse extract-from-config [options]
|
||||
|
||||
|
||||
clickhouse-compressor
|
||||
|
||||
Usage: clickhouse compressor [options] < INPUT > OUTPUT
|
||||
Alternative usage: clickhouse compressor [options] INPUT OUTPUT
|
||||
|
||||
clickhouse-format
|
||||
|
||||
Usage: clickhouse format [options] < query
|
||||
|
||||
clickhouse-obfuscator
|
||||
|
||||
|
||||
Simple tool for table data obfuscation.
|
||||
|
||||
It reads input table and produces output table, that retain some properties of input, but contains different data.
|
||||
It allows to publish almost real production data for usage in benchmarks.
|
||||
|
||||
It is designed to retain the following properties of data:
|
||||
- cardinalities of values (number of distinct values) for every column and for every tuple of columns;
|
||||
- conditional cardinalities: number of distinct values of one column under condition on value of another column;
|
||||
- probability distributions of absolute value of integers; sign of signed integers; exponent and sign for floats;
|
||||
- probability distributions of length of strings;
|
||||
- probability of zero values of numbers; empty strings and arrays, NULLs;
|
||||
- data compression ratio when compressed with LZ77 and entropy family of codecs;
|
||||
- continuity (magnitude of difference) of time values across table; continuity of floating point values.
|
||||
- date component of DateTime values;
|
||||
- UTF-8 validity of string values;
|
||||
- string values continue to look somewhat natural.
|
||||
|
||||
Most of the properties above are viable for performance testing:
|
||||
- reading data, filtering, aggregation and sorting will work at almost the same speed
|
||||
as on original data due to saved cardinalities, magnitudes, compression ratios, etc.
|
||||
|
||||
It works in deterministic fashion: you define a seed value and transform is totally determined by input data and by seed.
|
||||
Some transforms are one to one and could be reversed, so you need to have large enough seed and keep it in secret.
|
||||
|
||||
It use some cryptographic primitives to transform data, but from the cryptographic point of view,
|
||||
it doesn't do anything properly and you should never consider the result as secure, unless you have other reasons for it.
|
||||
|
||||
It may retain some data you don't want to publish.
|
||||
|
||||
It always leave numbers 0, 1, -1 as is. Also it leaves dates, lengths of arrays and null flags exactly as in source data.
|
||||
For example, you have a column IsMobile in your table with values 0 and 1. In transformed data, it will have the same value.
|
||||
So, the user will be able to count exact ratio of mobile traffic.
|
||||
|
||||
Another example, suppose you have some private data in your table, like user email and you don't want to publish any single email address.
|
||||
If your table is large enough and contain multiple different emails and there is no email that have very high frequency than all others,
|
||||
it will perfectly anonymize all data. But if you have small amount of different values in a column, it can possibly reproduce some of them.
|
||||
And you should take care and look at exact algorithm, how this tool works, and probably fine tune some of it command line parameters.
|
||||
|
||||
This tool works fine only with reasonable amount of data (at least 1000s of rows).
|
||||
|
||||
|
||||
Usage: clickhouse obfuscator [options] < in > out
|
||||
|
||||
Input must be seekable file (it will be read twice).
|
||||
|
||||
Example:
|
||||
clickhouse obfuscator --seed "$(head -c16 /dev/urandom | base64)" --input-format TSV --output-format TSV --structure 'CounterID UInt32, URLDomain String, URL String, SearchPhrase String, Title String' < stats.tsv
|
||||
|
||||
clickhouse-git-import
|
||||
|
||||
|
||||
A tool to extract information from Git repository for analytics.
|
||||
|
||||
It dumps the data for the following tables:
|
||||
- commits - commits with statistics;
|
||||
- file_changes - files changed in every commit with the info about the change and statistics;
|
||||
- line_changes - every changed line in every changed file in every commit with full info about the line and the information about previous change of this line.
|
||||
|
||||
The largest and the most important table is "line_changes".
|
||||
|
||||
Allows to answer questions like:
|
||||
- list files with maximum number of authors;
|
||||
- show me the oldest lines of code in the repository;
|
||||
- show me the files with longest history;
|
||||
- list favorite files for author;
|
||||
- list largest files with lowest number of authors;
|
||||
- at what weekday the code has highest chance to stay in repository;
|
||||
- the distribution of code age across repository;
|
||||
- files sorted by average code age;
|
||||
- quickly show file with blame info (rough);
|
||||
- commits and lines of code distribution by time; by weekday, by author; for specific subdirectories;
|
||||
- show history for every subdirectory, file, line of file, the number of changes (lines and commits) across time; how the number of contributors was changed across time;
|
||||
- list files with most modifications;
|
||||
- list files that were rewritten most number of time or by most of authors;
|
||||
- what is percentage of code removal by other authors, across authors;
|
||||
- the matrix of authors that shows what authors tends to rewrite another authors code;
|
||||
- what is the worst time to write code in sense that the code has highest chance to be rewritten;
|
||||
- the average time before code will be rewritten and the median (half-life of code decay);
|
||||
- comments/code percentage change in time / by author / by location;
|
||||
- who tend to write more tests / cpp code / comments.
|
||||
|
||||
The data is intended for analytical purposes. It can be imprecise by many reasons but it should be good enough for its purpose.
|
||||
|
||||
The data is not intended to provide any conclusions for managers, it is especially counter-indicative for any kinds of "performance review". Instead you can spend multiple days looking at various interesting statistics.
|
||||
|
||||
Run this tool inside your git repository. It will create .tsv files that can be loaded into ClickHouse (or into other DBMS if you dare).
|
||||
|
||||
The tool can process large enough repositories in a reasonable time.
|
||||
It has been tested on:
|
||||
- ClickHouse: 31 seconds; 3 million rows;
|
||||
- LLVM: 8 minutes; 62 million rows;
|
||||
- Linux - 12 minutes; 85 million rows;
|
||||
- Chromium - 67 minutes; 343 million rows;
|
||||
(the numbers as of Sep 2020)
|
||||
|
||||
|
||||
Prepare the database by executing the following queries:
|
||||
|
||||
DROP DATABASE IF EXISTS git;
|
||||
CREATE DATABASE git;
|
||||
|
||||
CREATE TABLE git.commits
|
||||
(
|
||||
hash String,
|
||||
author LowCardinality(String),
|
||||
time DateTime,
|
||||
message String,
|
||||
files_added UInt32,
|
||||
files_deleted UInt32,
|
||||
files_renamed UInt32,
|
||||
files_modified UInt32,
|
||||
lines_added UInt32,
|
||||
lines_deleted UInt32,
|
||||
hunks_added UInt32,
|
||||
hunks_removed UInt32,
|
||||
hunks_changed UInt32
|
||||
) ENGINE = MergeTree ORDER BY time;
|
||||
|
||||
CREATE TABLE git.file_changes
|
||||
(
|
||||
change_type Enum('Add' = 1, 'Delete' = 2, 'Modify' = 3, 'Rename' = 4, 'Copy' = 5, 'Type' = 6),
|
||||
path LowCardinality(String),
|
||||
old_path LowCardinality(String),
|
||||
file_extension LowCardinality(String),
|
||||
lines_added UInt32,
|
||||
lines_deleted UInt32,
|
||||
hunks_added UInt32,
|
||||
hunks_removed UInt32,
|
||||
hunks_changed UInt32,
|
||||
|
||||
commit_hash String,
|
||||
author LowCardinality(String),
|
||||
time DateTime,
|
||||
commit_message String,
|
||||
commit_files_added UInt32,
|
||||
commit_files_deleted UInt32,
|
||||
commit_files_renamed UInt32,
|
||||
commit_files_modified UInt32,
|
||||
commit_lines_added UInt32,
|
||||
commit_lines_deleted UInt32,
|
||||
commit_hunks_added UInt32,
|
||||
commit_hunks_removed UInt32,
|
||||
commit_hunks_changed UInt32
|
||||
) ENGINE = MergeTree ORDER BY time;
|
||||
|
||||
CREATE TABLE git.line_changes
|
||||
(
|
||||
sign Int8,
|
||||
line_number_old UInt32,
|
||||
line_number_new UInt32,
|
||||
hunk_num UInt32,
|
||||
hunk_start_line_number_old UInt32,
|
||||
hunk_start_line_number_new UInt32,
|
||||
hunk_lines_added UInt32,
|
||||
hunk_lines_deleted UInt32,
|
||||
hunk_context LowCardinality(String),
|
||||
line LowCardinality(String),
|
||||
indent UInt8,
|
||||
line_type Enum('Empty' = 0, 'Comment' = 1, 'Punct' = 2, 'Code' = 3),
|
||||
|
||||
prev_commit_hash String,
|
||||
prev_author LowCardinality(String),
|
||||
prev_time DateTime,
|
||||
|
||||
file_change_type Enum('Add' = 1, 'Delete' = 2, 'Modify' = 3, 'Rename' = 4, 'Copy' = 5, 'Type' = 6),
|
||||
path LowCardinality(String),
|
||||
old_path LowCardinality(String),
|
||||
file_extension LowCardinality(String),
|
||||
file_lines_added UInt32,
|
||||
file_lines_deleted UInt32,
|
||||
file_hunks_added UInt32,
|
||||
file_hunks_removed UInt32,
|
||||
file_hunks_changed UInt32,
|
||||
|
||||
commit_hash String,
|
||||
author LowCardinality(String),
|
||||
time DateTime,
|
||||
commit_message String,
|
||||
commit_files_added UInt32,
|
||||
commit_files_deleted UInt32,
|
||||
commit_files_renamed UInt32,
|
||||
commit_files_modified UInt32,
|
||||
commit_lines_added UInt32,
|
||||
commit_lines_deleted UInt32,
|
||||
commit_hunks_added UInt32,
|
||||
commit_hunks_removed UInt32,
|
||||
commit_hunks_changed UInt32
|
||||
) ENGINE = MergeTree ORDER BY time;
|
||||
|
||||
Run the tool.
|
||||
|
||||
Then insert the data with the following commands:
|
||||
|
||||
clickhouse-client --query "INSERT INTO git.commits FORMAT TSV" < commits.tsv
|
||||
clickhouse-client --query "INSERT INTO git.file_changes FORMAT TSV" < file_changes.tsv
|
||||
clickhouse-client --query "INSERT INTO git.line_changes FORMAT TSV" < line_changes.tsv
|
||||
|
||||
|
||||
Usage: clickhouse git-import
|
||||
clickhouse git-import --skip-paths 'generated\.cpp|^(contrib|docs?|website|libs/(libcityhash|liblz4|libdivide|libvectorclass|libdouble-conversion|libcpuid|libzstd|libfarmhash|libmetrohash|libpoco|libwidechar_width))/' --skip-commits-with-messages '^Merge branch '
|
||||
|
||||
clickhouse-keeper-converter
|
||||
|
||||
Usage: clickhouse keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/data/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/data/version-2 --output-dir /var/lib/clickhouse/coordination/snapshots
|
||||
|
||||
clickhouse-static-files-disk-uploader
|
||||
|
||||
|
||||
clickhouse-su
|
||||
|
||||
Usage: clickhouse su user:group ...
|
||||
|
||||
clickhouse-disks
|
||||
|
||||
ClickHouse disk management tool
|
||||
usage clickhouse disks [OPTION]
|
||||
clickhouse-disks
|
||||
|
||||
read read File `from_path` to `to_path` or to stdout
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
write Write File `from_path` or stdin to `to_path`
|
||||
|
||||
link Create hardlink from `from_path` to `to_path`
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
mkdir Create directory or directories recursively
|
||||
|
||||
remove Remove file or directory with all children. Throws exception if file doesn't exists.
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
move Move file or directory from `from_path` to `to_path`
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
copy Recursively copy data containing at `from_path` to `to_path`
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
list List files (the default disk is used by default)
|
||||
Path should be in format './' or './path' or 'path'
|
||||
|
||||
list-disks List disks names
|
||||
|
||||
clickhouse-disks:
|
||||
-h [ --help ] Print common help message
|
||||
-C [ --config-file ] arg
|
||||
Set config file
|
||||
--disk arg Set disk name
|
||||
--command_name arg
|
||||
Name for command to do
|
||||
--save-logs Save logs to a file
|
||||
--log-level arg Logging level
|
||||
|
89
tests/queries/0_stateless/02598_clickhouse_help_program_options.sh
Executable file
89
tests/queries/0_stateless/02598_clickhouse_help_program_options.sh
Executable file
@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
# We have to use fixed terminal width. It may break other tests results formatting.
|
||||
# In CI there is no tty and we just ignore failed stty calls.
|
||||
# Set 80 to have same as default size as in notty.
|
||||
backup_stty_size=$(stty size 2>/dev/null | awk '{print $2}' ||:)
|
||||
stty columns 60 2>/dev/null ||:
|
||||
|
||||
echo "================BINARY=========================="
|
||||
|
||||
echo -e "\nclickhouse --help\n"
|
||||
$CLICKHOUSE_BINARY --help
|
||||
echo -e "\nclickhouse help\n"
|
||||
$CLICKHOUSE_BINARY help
|
||||
|
||||
echo -e "\nclickhouse benchmark\n"
|
||||
$CLICKHOUSE_BINARY benchmark --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse client\n"
|
||||
$CLICKHOUSE_BINARY client --help | perl -0777 -pe 's/Main options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse local\n"
|
||||
$CLICKHOUSE_BINARY local --help | perl -0777 -pe 's/Main options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse compressor\n"
|
||||
$CLICKHOUSE_BINARY compressor --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse disks\n"
|
||||
$CLICKHOUSE_BINARY disks --help | perl -0777 -pe 's/Main options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse extract\n"
|
||||
$CLICKHOUSE_BINARY extract-from-config --help
|
||||
echo -e "\nclickhouse format\n"
|
||||
$CLICKHOUSE_BINARY format --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse git-import\n"
|
||||
$CLICKHOUSE_BINARY git-import --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse install\n"
|
||||
$CLICKHOUSE_BINARY install --help | perl -ne "s/sudo clickhouse/clickhouse/g; print;"
|
||||
echo -e "\nclickhouse keeper-converter\n"
|
||||
$CLICKHOUSE_BINARY keeper-converter --help
|
||||
echo -e "\nclickhouse obfuscator\n"
|
||||
$CLICKHOUSE_BINARY obfuscator --help | perl -0777 -pe 's/Options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse static\n"
|
||||
$CLICKHOUSE_BINARY static-files-disk-uploader --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
|
||||
|
||||
|
||||
echo -e "\nclickhouse start\n"
|
||||
$CLICKHOUSE_BINARY start --help | perl -ne "s/sudo clickhouse/clickhouse/g; print;"
|
||||
echo -e "\nclickhouse stop\n"
|
||||
$CLICKHOUSE_BINARY stop --help | perl -ne "s/sudo clickhouse/clickhouse/g; print;"
|
||||
echo -e "\nclickhouse status\n"
|
||||
$CLICKHOUSE_BINARY status --help | perl -ne "s/sudo clickhouse/clickhouse/g; print;"
|
||||
echo -e "\nclickhouse restart\n"
|
||||
$CLICKHOUSE_BINARY restart --help | perl -ne "s/sudo clickhouse/clickhouse/g; print;"
|
||||
echo -e "\nclickhouse su\n"
|
||||
$CLICKHOUSE_BINARY su --help
|
||||
echo -e "\nclickhouse hash\n"
|
||||
$CLICKHOUSE_BINARY hash-binary --help | grep -v 'Current binary hash'
|
||||
|
||||
echo "================SYMLINK=============================="
|
||||
|
||||
echo -e "\nclickhouse-local\n"
|
||||
${CLICKHOUSE_BINARY}-local --help | perl -0777 -pe 's/Main options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-client\n"
|
||||
${CLICKHOUSE_BINARY}-client --help | perl -0777 -pe 's/Main options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-benchmark\n"
|
||||
${CLICKHOUSE_BINARY}-benchmark --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-extract\n"
|
||||
${CLICKHOUSE_BINARY}-extract-from-config --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-compressor\n"
|
||||
${CLICKHOUSE_BINARY}-compressor --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-format\n"
|
||||
${CLICKHOUSE_BINARY}-format --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-obfuscator\n"
|
||||
${CLICKHOUSE_BINARY}-obfuscator --help | perl -0777 -pe 's/Options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-git-import\n"
|
||||
${CLICKHOUSE_BINARY}-git-import --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-keeper-converter\n"
|
||||
${CLICKHOUSE_BINARY}-keeper-converter --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-static-files-disk-uploader\n"
|
||||
${CLICKHOUSE_BINARY}-static-files-disk-uploader --help | perl -0777 -pe 's/Allowed options:.*\n\n//igs'
|
||||
echo -e "\nclickhouse-su\n"
|
||||
${CLICKHOUSE_BINARY}-su --help
|
||||
echo -e "\nclickhouse-disks\n"
|
||||
${CLICKHOUSE_BINARY}-disks --help | perl -0777 -pe 's/Main options:.*\n\n//igs'
|
||||
|
||||
stty columns $backup_stty_size 2>/dev/null ||:
|
Loading…
Reference in New Issue
Block a user