mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 01:22:04 +00:00
resolved conflict and added example to RU version
This commit is contained in:
commit
1b4166a179
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -197,8 +197,7 @@
|
||||
url = https://github.com/danlark1/miniselect
|
||||
[submodule "contrib/rocksdb"]
|
||||
path = contrib/rocksdb
|
||||
url = https://github.com/facebook/rocksdb
|
||||
branch = v6.14.5
|
||||
url = https://github.com/ClickHouse-Extras/rocksdb.git
|
||||
[submodule "contrib/xz"]
|
||||
path = contrib/xz
|
||||
url = https://github.com/xz-mirror/xz
|
||||
@ -209,3 +208,6 @@
|
||||
[submodule "contrib/dragonbox"]
|
||||
path = contrib/dragonbox
|
||||
url = https://github.com/ClickHouse-Extras/dragonbox.git
|
||||
[submodule "contrib/fast_float"]
|
||||
path = contrib/fast_float
|
||||
url = https://github.com/fastfloat/fast_float
|
||||
|
@ -223,8 +223,8 @@ if (ARCH_NATIVE)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
||||
endif ()
|
||||
|
||||
if (UNBUNDLED AND (COMPILER_GCC OR COMPILER_CLANG))
|
||||
# to make numeric_limits<__int128> works for unbundled build
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
# to make numeric_limits<__int128> works with GCC
|
||||
set (_CXX_STANDARD "-std=gnu++2a")
|
||||
else()
|
||||
set (_CXX_STANDARD "-std=c++2a")
|
||||
@ -457,6 +457,7 @@ include (cmake/find/s3.cmake)
|
||||
include (cmake/find/base64.cmake)
|
||||
include (cmake/find/parquet.cmake)
|
||||
include (cmake/find/simdjson.cmake)
|
||||
include (cmake/find/fast_float.cmake)
|
||||
include (cmake/find/rapidjson.cmake)
|
||||
include (cmake/find/fastops.cmake)
|
||||
include (cmake/find/odbc.cmake)
|
||||
|
@ -58,8 +58,7 @@ public:
|
||||
using signed_base_type = int64_t;
|
||||
|
||||
// ctors
|
||||
integer() = default;
|
||||
|
||||
constexpr integer() noexcept;
|
||||
template <typename T>
|
||||
constexpr integer(T rhs) noexcept;
|
||||
template <typename T>
|
||||
|
@ -916,6 +916,11 @@ public:
|
||||
|
||||
// Members
|
||||
|
||||
template <size_t Bits, typename Signed>
|
||||
constexpr integer<Bits, Signed>::integer() noexcept
|
||||
: items{}
|
||||
{}
|
||||
|
||||
template <size_t Bits, typename Signed>
|
||||
template <typename T>
|
||||
constexpr integer<Bits, Signed>::integer(T rhs) noexcept
|
||||
|
@ -761,14 +761,14 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
static KillingErrorHandler killing_error_handler;
|
||||
Poco::ErrorHandler::set(&killing_error_handler);
|
||||
|
||||
signal_pipe.setNonBlocking();
|
||||
signal_pipe.setNonBlockingWrite();
|
||||
signal_pipe.tryIncreaseSize(1 << 20);
|
||||
|
||||
signal_listener = std::make_unique<SignalListener>(*this);
|
||||
signal_listener_thread.start(*signal_listener);
|
||||
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__)
|
||||
String build_id_hex = DB::SymbolIndex::instance().getBuildIDHex();
|
||||
String build_id_hex = DB::SymbolIndex::instance()->getBuildIDHex();
|
||||
if (build_id_hex.empty())
|
||||
build_id_info = "no build id";
|
||||
else
|
||||
|
@ -179,7 +179,7 @@ void SentryWriter::onFault(int sig, const std::string & error_message, const Sta
|
||||
sentry_set_extra("signal_number", sentry_value_new_int32(sig));
|
||||
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__)
|
||||
const String & build_id_hex = DB::SymbolIndex::instance().getBuildIDHex();
|
||||
const String & build_id_hex = DB::SymbolIndex::instance()->getBuildIDHex();
|
||||
sentry_set_tag("build_id", build_id_hex.c_str());
|
||||
#endif
|
||||
|
||||
|
@ -248,7 +248,7 @@ bool Pool::Entry::tryForceConnected() const
|
||||
if (prev_connection_id != current_connection_id)
|
||||
{
|
||||
auto & logger = Poco::Util::Application::instance().logger();
|
||||
logger.information("Connection to mysql server has been reestablished. Connection id changed: %d -> %d",
|
||||
logger.information("Connection to mysql server has been reestablished. Connection id changed: %lu -> %lu",
|
||||
prev_connection_id, current_connection_id);
|
||||
}
|
||||
return true;
|
||||
|
@ -22,4 +22,12 @@ ResultBase::~ResultBase()
|
||||
mysql_free_result(res);
|
||||
}
|
||||
|
||||
std::string ResultBase::getFieldName(size_t n) const
|
||||
{
|
||||
if (num_fields <= n)
|
||||
throw Exception(std::string("Unknown column position ") + std::to_string(n));
|
||||
|
||||
return fields[n].name;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -31,6 +31,8 @@ public:
|
||||
MYSQL_RES * getRes() { return res; }
|
||||
const Query * getQuery() const { return query; }
|
||||
|
||||
std::string getFieldName(size_t n) const;
|
||||
|
||||
virtual ~ResultBase();
|
||||
|
||||
protected:
|
||||
|
6
cmake/find/fast_float.cmake
Normal file
6
cmake/find/fast_float.cmake
Normal file
@ -0,0 +1,6 @@
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fast_float/include/fast_float/fast_float.h")
|
||||
message (FATAL_ERROR "submodule contrib/fast_float is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
endif ()
|
||||
|
||||
set(FAST_FLOAT_LIBRARY fast_float)
|
||||
set(FAST_FLOAT_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/fast_float/include/")
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -323,3 +323,4 @@ if (USE_INTERNAL_ROCKSDB_LIBRARY)
|
||||
endif()
|
||||
|
||||
add_subdirectory(dragonbox)
|
||||
add_subdirectory(fast_float)
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit a04e72c0464f0c31d3384f18f0c0db36a05538e0
|
||||
Subproject commit a7ceabe4747ecc3309dd3dcd9de4b29660dfd298
|
1
contrib/fast_float
vendored
Submodule
1
contrib/fast_float
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 7eae925b51fd0f570ccd5c880c12e3e27a23b86f
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit f3d791f6568b99366d089b4479f76a515beb66d5
|
||||
Subproject commit b5523bb9b4bc4239640cbfec4d734be8b8585639
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 35d8e36ef1b8e3e0759ca81215f855226a0a54bd
|
||||
Subproject commit 8b966f0ca298fc1475bd09d9775f32dff0fdce0a
|
@ -345,6 +345,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc
|
||||
@ -460,6 +461,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/cf_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||
@ -583,8 +585,9 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc
|
||||
@ -592,7 +595,6 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_lock_mgr.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||
|
@ -155,6 +155,7 @@ function clone_submodules
|
||||
contrib/miniselect
|
||||
contrib/xz
|
||||
contrib/dragonbox
|
||||
contrib/fast_float
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
@ -318,6 +319,9 @@ function run_tests
|
||||
01545_system_errors
|
||||
# Checks system.errors
|
||||
01563_distributed_query_finish
|
||||
|
||||
# nc - command not found
|
||||
01601_proxy_protocol
|
||||
)
|
||||
|
||||
time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
|
@ -7,6 +7,11 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
||||
stage=${stage:-}
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
# upstream/master
|
||||
LEFT_SERVER_PORT=9001
|
||||
# patched version
|
||||
RIGHT_SERVER_PORT=9002
|
||||
|
||||
function wait_for_server # port, pid
|
||||
{
|
||||
for _ in {1..60}
|
||||
@ -37,25 +42,32 @@ function configure
|
||||
rm right/config/config.d/text_log.xml ||:
|
||||
cp -rv right/config left ||:
|
||||
|
||||
sed -i 's/<tcp_port>900./<tcp_port>9001/g' left/config/config.xml
|
||||
sed -i 's/<tcp_port>900./<tcp_port>9002/g' right/config/config.xml
|
||||
|
||||
# Start a temporary server to rename the tables
|
||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
||||
echo all killed
|
||||
|
||||
set -m # Spawn temporary in its own process groups
|
||||
left/clickhouse-server --config-file=left/config/config.xml -- --path db0 --user_files_path db0/user_files &> setup-server-log.log &
|
||||
|
||||
local setup_left_server_opts=(
|
||||
# server options
|
||||
--config-file=left/config/config.xml
|
||||
--
|
||||
# server *config* directives overrides
|
||||
--path db0
|
||||
--user_files_path db0/user_files
|
||||
--tcp_port $LEFT_SERVER_PORT
|
||||
)
|
||||
left/clickhouse-server "${setup_left_server_opts[@]}" &> setup-server-log.log &
|
||||
left_pid=$!
|
||||
kill -0 $left_pid
|
||||
disown $left_pid
|
||||
set +m
|
||||
|
||||
wait_for_server 9001 $left_pid
|
||||
wait_for_server $LEFT_SERVER_PORT $left_pid
|
||||
echo Server for setup started
|
||||
|
||||
clickhouse-client --port 9001 --query "create database test" ||:
|
||||
clickhouse-client --port 9001 --query "rename table datasets.hits_v1 to test.hits" ||:
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||:
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||:
|
||||
|
||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
||||
echo all killed
|
||||
@ -83,16 +95,30 @@ function restart
|
||||
|
||||
set -m # Spawn servers in their own process groups
|
||||
|
||||
left/clickhouse-server --config-file=left/config/config.xml \
|
||||
-- --path left/db --user_files_path left/db/user_files \
|
||||
&>> left-server-log.log &
|
||||
local left_server_opts=(
|
||||
# server options
|
||||
--config-file=left/config/config.xml
|
||||
--
|
||||
# server *config* directives overrides
|
||||
--path left/db
|
||||
--user_files_path left/db/user_files
|
||||
--tcp_port $LEFT_SERVER_PORT
|
||||
)
|
||||
left/clickhouse-server "${left_server_opts[@]}" &>> left-server-log.log &
|
||||
left_pid=$!
|
||||
kill -0 $left_pid
|
||||
disown $left_pid
|
||||
|
||||
right/clickhouse-server --config-file=right/config/config.xml \
|
||||
-- --path right/db --user_files_path right/db/user_files \
|
||||
&>> right-server-log.log &
|
||||
local right_server_opts=(
|
||||
# server options
|
||||
--config-file=right/config/config.xml
|
||||
--
|
||||
# server *config* directives overrides
|
||||
--path right/db
|
||||
--user_files_path right/db/user_files
|
||||
--tcp_port $RIGHT_SERVER_PORT
|
||||
)
|
||||
right/clickhouse-server "${right_server_opts[@]}" &>> right-server-log.log &
|
||||
right_pid=$!
|
||||
kill -0 $right_pid
|
||||
disown $right_pid
|
||||
@ -101,16 +127,16 @@ function restart
|
||||
|
||||
unset MALLOC_CONF
|
||||
|
||||
wait_for_server 9001 $left_pid
|
||||
wait_for_server $LEFT_SERVER_PORT $left_pid
|
||||
echo left ok
|
||||
|
||||
wait_for_server 9002 $right_pid
|
||||
wait_for_server $RIGHT_SERVER_PORT $right_pid
|
||||
echo right ok
|
||||
|
||||
clickhouse-client --port 9001 --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port 9001 --query "select * from system.build_options"
|
||||
clickhouse-client --port 9002 --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port 9002 --query "select * from system.build_options"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.build_options"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.build_options"
|
||||
|
||||
# Check again that both servers we started are running -- this is important
|
||||
# for running locally, when there might be some other servers started and we
|
||||
@ -199,9 +225,9 @@ function run_tests
|
||||
for test in $test_files
|
||||
do
|
||||
# Check that both servers are alive, and restart them if they die.
|
||||
clickhouse-client --port 9001 --query "select 1 format Null" \
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select 1 format Null" \
|
||||
|| { echo $test_name >> left-server-died.log ; restart ; }
|
||||
clickhouse-client --port 9002 --query "select 1 format Null" \
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1 format Null" \
|
||||
|| { echo $test_name >> right-server-died.log ; restart ; }
|
||||
|
||||
test_name=$(basename "$test" ".xml")
|
||||
@ -215,7 +241,7 @@ function run_tests
|
||||
# The grep is to filter out set -x output and keep only time output.
|
||||
# The '2>&1 >/dev/null' redirects stderr to stdout, and discards stdout.
|
||||
{ \
|
||||
time "$script_dir/perf.py" --host localhost localhost --port 9001 9002 \
|
||||
time "$script_dir/perf.py" --host localhost localhost --port $LEFT_SERVER_PORT $RIGHT_SERVER_PORT \
|
||||
--runs "$CHPC_RUNS" --max-queries "$CHPC_MAX_QUERIES" \
|
||||
--profile-seconds "$profile_seconds" \
|
||||
-- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; \
|
||||
@ -257,36 +283,36 @@ function get_profiles_watchdog
|
||||
function get_profiles
|
||||
{
|
||||
# Collect the profiles
|
||||
clickhouse-client --port 9001 --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port 9001 --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port 9001 --query "system flush logs" &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "system flush logs" &
|
||||
|
||||
clickhouse-client --port 9002 --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port 9002 --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port 9002 --query "system flush logs" &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "system flush logs" &
|
||||
|
||||
wait
|
||||
|
||||
clickhouse-client --port 9001 --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
|
||||
|
||||
clickhouse-client --port 9002 --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > right-async-metric-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > right-async-metric-log.tsv ||: &
|
||||
|
||||
wait
|
||||
|
||||
# Just check that the servers are alive so that we return a proper exit code.
|
||||
# We don't consistently check the return codes of the above background jobs.
|
||||
clickhouse-client --port 9001 --query "select 1"
|
||||
clickhouse-client --port 9002 --query "select 1"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select 1"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1"
|
||||
}
|
||||
|
||||
function build_log_column_definitions
|
||||
|
@ -2,6 +2,7 @@
|
||||
<http_port remove="remove"/>
|
||||
<mysql_port remove="remove"/>
|
||||
<interserver_http_port remove="remove"/>
|
||||
<tcp_with_proxy_port remove="remove"/>
|
||||
<listen_host>::</listen_host>
|
||||
|
||||
<logger>
|
@ -117,8 +117,8 @@ You can compile packages and install them or use programs without installing pac
|
||||
|
||||
You’ll need to create a data and metadata folders and `chown` them for the desired user. Their paths can be changed in server config (src/programs/server/config.xml), by default they are:
|
||||
|
||||
/opt/clickhouse/data/default/
|
||||
/opt/clickhouse/metadata/default/
|
||||
/var/lib/clickhouse/data/default/
|
||||
/var/lib/clickhouse/metadata/default/
|
||||
|
||||
On Gentoo, you can just use `emerge clickhouse` to install ClickHouse from sources.
|
||||
|
||||
|
@ -113,7 +113,8 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
||||
- `--port` – The port to connect to. Default value: 9000. Note that the HTTP interface and the native interface use different ports.
|
||||
- `--user, -u` – The username. Default value: default.
|
||||
- `--password` – The password. Default value: empty string.
|
||||
- `--query, -q` – The query to process when using non-interactive mode.
|
||||
- `--query, -q` – The query to process when using non-interactive mode. You must specify either `query` or `queries-file` option.
|
||||
- `--queries-file, -qf` - file path with queries to execute. You must specify either `query` or `queries-file` option.
|
||||
- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ by default).
|
||||
- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter).
|
||||
- `--multiquery, -n` – If specified, allow processing multiple queries separated by semicolons.
|
||||
|
@ -457,7 +457,10 @@ This format is only appropriate for outputting a query result, but not for parsi
|
||||
|
||||
ClickHouse supports [NULL](../sql-reference/syntax.md), which is displayed as `null` in the JSON output. To enable `+nan`, `-nan`, `+inf`, `-inf` values in output, set the [output_format_json_quote_denormals](../operations/settings/settings.md#settings-output_format_json_quote_denormals) to 1.
|
||||
|
||||
See also the [JSONEachRow](#jsoneachrow) format.
|
||||
**See Also**
|
||||
|
||||
- [JSONEachRow](#jsoneachrow) format
|
||||
- [output_format_json_array_of_rows](../operations/settings/settings.md#output-format-json-array-of-rows) setting
|
||||
|
||||
## JSONString {#jsonstring}
|
||||
|
||||
|
@ -48,6 +48,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.flipkart.com/" class="favicon">Flipkart</a> | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) |
|
||||
| <a href="https://fun.co/rp" class="favicon">FunCorp</a> | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) |
|
||||
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| <a href="https://www.genotek.ru/" class="favicon">Genotek</a> | Bioinformatics | Main product | — | — | [Video, August 2020](https://youtu.be/v3KyZbz9lEE) |
|
||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| <a href="https://www.the-ica.com/" class="favicon">ICA</a> | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
|
@ -2395,6 +2395,54 @@ My NULL
|
||||
My NULL
|
||||
```
|
||||
|
||||
## output_format_json_array_of_rows {#output-format-json-array-of-rows}
|
||||
|
||||
Enables the ability to output all rows as a JSON array in the [JSONEachRow](../../interfaces/formats.md#jsoneachrow) format.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 — ClickHouse outputs all rows as an array, each row in the `JSONEachRow` format.
|
||||
- 0 — ClickHouse outputs each row separately in the `JSONEachRow` format.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Example of a query with the enabled setting**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET output_format_json_array_of_rows = 1;
|
||||
SELECT number FROM numbers(3) FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
[
|
||||
{"number":"0"},
|
||||
{"number":"1"},
|
||||
{"number":"2"}
|
||||
]
|
||||
```
|
||||
|
||||
**Example of a query with the disabled setting**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET output_format_json_array_of_rows = 0;
|
||||
SELECT number FROM numbers(3) FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
{"number":"0"}
|
||||
{"number":"1"}
|
||||
{"number":"2"}
|
||||
```
|
||||
|
||||
=======
|
||||
## allow_nullable_key {#allow-nullable-key}
|
||||
|
||||
Allows using of the [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable)-typed values in a sorting and a primary key for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) tables.
|
||||
|
@ -9,6 +9,12 @@ Connects to a ClickHouse server and repeatedly sends specified queries.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` bash
|
||||
$ clickhouse-benchmark --query ["single query"] [keys]
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` bash
|
||||
$ echo "single query" | clickhouse-benchmark [keys]
|
||||
```
|
||||
@ -34,6 +40,7 @@ clickhouse-benchmark [keys] < queries_file
|
||||
|
||||
## Keys {#clickhouse-benchmark-keys}
|
||||
|
||||
- `--query=WORD` - Query to execute. If this parameter is not passed clickhouse-benchmark will read queries from standard input.
|
||||
- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1.
|
||||
- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1.
|
||||
- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys.
|
||||
|
@ -32,7 +32,8 @@ Arguments:
|
||||
- `-S`, `--structure` — table structure for input data.
|
||||
- `-if`, `--input-format` — input format, `TSV` by default.
|
||||
- `-f`, `--file` — path to data, `stdin` by default.
|
||||
- `-q` `--query` — queries to execute with `;` as delimeter.
|
||||
- `-q` `--query` — queries to execute with `;` as delimeter. You must specify either `query` or `queries-file` option.
|
||||
- `-qf` `--queries-file` - file path with queries to execute. You must specify either `query` or `queries-file` option.
|
||||
- `-N`, `--table` — table name where to put output data, `table` by default.
|
||||
- `-of`, `--format`, `--output-format` — output format, `TSV` by default.
|
||||
- `--stacktrace` — whether to dump debug output in case of exception.
|
||||
|
@ -692,3 +692,147 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime
|
||||
│ 2009-02-11 14:42:23 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## toModifiedJulianDay {#tomodifiedjulianday}
|
||||
|
||||
Converts a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD` to a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number in Int32. This function supports date from `0000-01-01` to `9999-12-31`. It raises an exception if the argument cannot be parsed as a date, or the date is invalid.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toModifiedJulianDay(date)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `date` — Date in text form. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Modified Julian Day number.
|
||||
|
||||
Type: [Int32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toModifiedJulianDay('2020-01-01');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toModifiedJulianDay('2020-01-01')─┐
|
||||
│ 58849 │
|
||||
└───────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toModifiedJulianDayOrNull {#tomodifiedjuliandayornull}
|
||||
|
||||
Similar to [toModifiedJulianDay()](#tomodifiedjulianday), but instead of raising exceptions it returns `NULL`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toModifiedJulianDayOrNull(date)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `date` — Date in text form. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Modified Julian Day number.
|
||||
|
||||
Type: [Nullable(Int32)](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toModifiedJulianDayOrNull('2020-01-01');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toModifiedJulianDayOrNull('2020-01-01')─┐
|
||||
│ 58849 │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## fromModifiedJulianDay {#frommodifiedjulianday}
|
||||
|
||||
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973119` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
fromModifiedJulianDay(day)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `day` — Modified Julian Day number. [Any integral types](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Date in text form.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT fromModifiedJulianDay(58849);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─fromModifiedJulianDay(58849)─┐
|
||||
│ 2020-01-01 │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
## fromModifiedJulianDayOrNull {#frommodifiedjuliandayornull}
|
||||
|
||||
Similar to [fromModifiedJulianDayOrNull()](#frommodifiedjuliandayornull), but instead of raising exceptions it returns `NULL`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
fromModifiedJulianDayOrNull(day)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `day` — Modified Julian Day number. [Any integral types](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Date in text form.
|
||||
|
||||
Type: [Nullable(String)](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT fromModifiedJulianDayOrNull(58849);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─fromModifiedJulianDayOrNull(58849)─┐
|
||||
│ 2020-01-01 │
|
||||
└────────────────────────────────────┘
|
||||
```
|
||||
|
@ -111,4 +111,306 @@ Accepts a numeric argument and returns a UInt64 number close to 2 to the power o
|
||||
|
||||
Accepts a numeric argument and returns a UInt64 number close to 10 to the power of x.
|
||||
|
||||
## cosh(x) {#coshx}
|
||||
|
||||
[Hyperbolic cosine](https://in.mathworks.com/help/matlab/ref/cosh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
cosh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Values from the interval: `1 <= cosh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT cosh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─cosh(0)──┐
|
||||
│ 1 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## acosh(x) {#acoshx}
|
||||
|
||||
[Inverse hyperbolic cosine](https://www.mathworks.com/help/matlab/ref/acosh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
acosh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Hyperbolic cosine of angle. Values from the interval: `1 <= x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The angle, in radians. Values from the interval: `0 <= acosh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT acosh(1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─acosh(1)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [cosh(x)](../../sql-reference/functions/math-functions.md#coshx)
|
||||
|
||||
## sinh(x) {#sinhx}
|
||||
|
||||
[Hyperbolic sine](https://www.mathworks.com/help/matlab/ref/sinh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
sinh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Values from the interval: `-∞ < sinh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT sinh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─sinh(0)──┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## asinh(x) {#asinhx}
|
||||
|
||||
[Inverse hyperbolic sine](https://www.mathworks.com/help/matlab/ref/asinh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
asinh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Hyperbolic sine of angle. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The angle, in radians. Values from the interval: `-∞ < asinh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT asinh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─asinh(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [sinh(x)](../../sql-reference/functions/math-functions.md#sinhx)
|
||||
|
||||
## atanh(x) {#atanhx}
|
||||
|
||||
[Inverse hyperbolic tangent](https://www.mathworks.com/help/matlab/ref/atanh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
atanh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Hyperbolic tangent of angle. Values from the interval: `–1 < x < 1`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The angle, in radians. Values from the interval: `-∞ < atanh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT atanh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─atanh(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## atan2(y, x) {#atan2yx}
|
||||
|
||||
The [function](https://en.wikipedia.org/wiki/Atan2) calculates the angle in the Euclidean plane, given in radians, between the positive x axis and the ray to the point `(x, y) ≠ (0, 0)`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
atan2(y, x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `y` — y-coordinate of the point through which the ray passes. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — x-coordinate of the point through which the ray passes. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The angle `θ` such that `−π < θ ≤ π`, in radians.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT atan2(1, 1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────────atan2(1, 1)─┐
|
||||
│ 0.7853981633974483 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## hypot(x, y) {#hypotxy}
|
||||
|
||||
Calculates the length of the hypotenuse of a right-angle triangle. The [function](https://en.wikipedia.org/wiki/Hypot) avoids problems that occur when squaring very large or very small numbers.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hypot(x, y)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — The first cathetus of a right-angle triangle. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `y` — The second cathetus of a right-angle triangle. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The length of the hypotenuse of a right-angle triangle.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT hypot(1, 1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────────hypot(1, 1)─┐
|
||||
│ 1.4142135623730951 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## log1p(x) {#log1px}
|
||||
|
||||
Calculates `log(1+x)`. The [function](https://en.wikipedia.org/wiki/Natural_logarithm#lnp1) `log1p(x)` is more accurate than `log(1+x)` for small values of x.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
log1p(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Values from the interval: `-1 < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Values from the interval: `-∞ < log1p(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT log1p(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─log1p(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [log(x)](../../sql-reference/functions/math-functions.md#logx-lnx)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) <!--hide-->
|
||||
|
@ -11,7 +11,7 @@ Syntax:
|
||||
|
||||
``` sql
|
||||
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
|
||||
[KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]
|
||||
[KEYED BY {'none' | 'user name' | 'ip address' | 'forwarded ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]
|
||||
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR}
|
||||
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |
|
||||
NO LIMITS | TRACKING ONLY} [,...]]
|
||||
|
@ -434,7 +434,10 @@ JSON совместим с JavaScript. Для этого, дополнитель
|
||||
|
||||
ClickHouse поддерживает [NULL](../sql-reference/syntax.md), который при выводе JSON будет отображен как `null`. Чтобы включить отображение в результате значений `+nan`, `-nan`, `+inf`, `-inf`, установите параметр [output_format_json_quote_denormals](../operations/settings/settings.md#settings-output_format_json_quote_denormals) равным 1.
|
||||
|
||||
Смотрите также формат [JSONEachRow](#jsoneachrow).
|
||||
**Смотрите также**
|
||||
|
||||
- Формат [JSONEachRow](#jsoneachrow)
|
||||
- Настройка [output_format_json_array_of_rows](../operations/settings/settings.md#output-format-json-array-of-rows)
|
||||
|
||||
## JSONString {#jsonstring}
|
||||
|
||||
|
@ -2235,6 +2235,84 @@ SELECT CAST(toNullable(toInt32(0)) AS Int32) as x, toTypeName(x);
|
||||
|
||||
Значение по умолчанию: `\N`.
|
||||
|
||||
**Примеры**
|
||||
|
||||
Запрос
|
||||
|
||||
```sql
|
||||
SELECT * FROM tsv_custom_null FORMAT TSV;
|
||||
```
|
||||
|
||||
Результат
|
||||
|
||||
```text
|
||||
788
|
||||
\N
|
||||
\N
|
||||
```
|
||||
|
||||
Запрос
|
||||
|
||||
```sql
|
||||
SET output_format_tsv_null_representation = 'My NULL';
|
||||
SELECT * FROM tsv_custom_null FORMAT TSV;
|
||||
```
|
||||
|
||||
Результат
|
||||
|
||||
```text
|
||||
788
|
||||
My NULL
|
||||
My NULL
|
||||
```
|
||||
|
||||
## output_format_json_array_of_rows {#output-format-json-array-of-rows}
|
||||
|
||||
Позволяет выводить все строки в виде массива JSON в формате [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 1 — ClickHouse выводит все строки в виде массива и при этом каждую строку в формате `JSONEachRow`.
|
||||
- 0 — ClickHouse выводит каждую строку отдельно в формате `JSONEachRow`.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
**Пример запроса с включенной настройкой**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET output_format_json_array_of_rows = 1;
|
||||
SELECT number FROM numbers(3) FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
[
|
||||
{"number":"0"},
|
||||
{"number":"1"},
|
||||
{"number":"2"}
|
||||
]
|
||||
```
|
||||
|
||||
**Пример запроса с отключенной настройкой**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET output_format_json_array_of_rows = 0;
|
||||
SELECT number FROM numbers(3) FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
{"number":"0"}
|
||||
{"number":"1"}
|
||||
{"number":"2"}
|
||||
```
|
||||
|
||||
## allow_nullable_key {#allow-nullable-key}
|
||||
|
||||
Включает или отключает поддержку типа [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) для ключей таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree).
|
||||
|
@ -103,4 +103,306 @@ SELECT erf(3 / sqrt(2))
|
||||
|
||||
Принимает два числовых аргумента x и y. Возвращает число типа Float64, близкое к x в степени y.
|
||||
|
||||
## cosh(x) {#coshx}
|
||||
|
||||
[Гиперболический косинус](https://help.scilab.org/docs/5.4.0/ru_RU/cosh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
cosh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — угол в радианах. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значения из интервала: `1 <= cosh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT cosh(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─cosh(0)──┐
|
||||
│ 1 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## acosh(x) {#acoshx}
|
||||
|
||||
[Обратный гиперболический косинус](https://help.scilab.org/docs/5.4.0/ru_RU/acosh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
acosh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — гиперболический косинус угла. Значения из интервала: `1 <= x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Угол в радианах. Значения из интервала: `0 <= acosh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT acosh(1);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─acosh(1)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [cosh(x)](../../sql-reference/functions/math-functions.md#coshx)
|
||||
|
||||
## sinh(x) {#sinhx}
|
||||
|
||||
[Гиперболический синус](https://help.scilab.org/docs/5.4.0/ru_RU/sinh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
sinh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — угол в радианах. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значения из интервала: `-∞ < sinh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT sinh(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─sinh(0)──┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## asinh(x) {#asinhx}
|
||||
|
||||
[Обратный гиперболический синус](https://help.scilab.org/docs/5.4.0/ru_RU/asinh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
asinh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — гиперболический синус угла. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Угол в радианах. Значения из интервала: `-∞ < asinh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT asinh(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─asinh(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [sinh(x)](../../sql-reference/functions/math-functions.md#sinhx)
|
||||
|
||||
## atanh(x) {#atanhx}
|
||||
|
||||
[Обратный гиперболический тангенс](https://help.scilab.org/docs/5.4.0/ru_RU/atanh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
atanh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — гиперболический тангенс угла. Значения из интервала: `–1 < x < 1`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Угол в радианах. Значения из интервала: `-∞ < atanh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT atanh(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─atanh(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## atan2(y, x) {#atan2yx}
|
||||
|
||||
[Функция](https://msoffice-prowork.com/ref/excel/excelfunc/math/atan2/) вычисляет угол в радианах между положительной осью x и линией, проведенной из начала координат в точку `(x, y) ≠ (0, 0)`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
atan2(y, x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `y` — координата y точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — координата х точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Угол `θ` в радианах из интервала: `−π < θ ≤ π`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT atan2(1, 1);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌────────atan2(1, 1)─┐
|
||||
│ 0.7853981633974483 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## hypot(x, y) {#hypotxy}
|
||||
|
||||
Вычисляет длину гипотенузы прямоугольного треугольника. При использовании этой [функции](https://php.ru/manual/function.hypot.html) не возникает проблем при возведении в квадрат очень больших или очень малых чисел.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
hypot(x, y)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — первый катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `y` — второй катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Длина гипотенузы прямоугольного треугольника.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT hypot(1, 1);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌────────hypot(1, 1)─┐
|
||||
│ 1.4142135623730951 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## log1p(x) {#log1px}
|
||||
|
||||
Вычисляет `log(1+x)`. [Функция](https://help.scilab.org/docs/6.0.1/ru_RU/log1p.html) `log1p(x)` является более точной, чем функция `log(1+x)` для малых значений x.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
log1p(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — значения из интервала: `-1 < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значения из интервала: `-∞ < log1p(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT log1p(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─log1p(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [log(x)](../../sql-reference/functions/math-functions.md#logx)
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/math_functions/) <!--hide-->
|
||||
|
@ -46,7 +46,7 @@ ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS)
|
||||
- 处理单个查询时需要高吞吐量(每台服务器每秒可达数十亿行)
|
||||
- 事务不是必须的
|
||||
- 对数据一致性要求低
|
||||
- 每个查询有一个大表。除了他意以外,其他的都很小。
|
||||
- 每个查询有一个大表。除了他以外,其他的都很小。
|
||||
- 查询结果明显小于源数据。换句话说,数据经过过滤或聚合,因此结果适合于单个服务器的RAM中
|
||||
|
||||
很容易可以看出,OLAP场景与其他通常业务场景(例如,OLTP或K/V)有很大的不同, 因此想要使用OLTP或Key-Value数据库去高效的处理分析查询场景,并不是非常完美的适用方案。例如,使用OLAP数据库去处理分析请求通常要优于使用MongoDB或Redis去处理分析请求。
|
||||
|
@ -1,71 +1,105 @@
|
||||
# 命令行客户端 {#ming-ling-xing-ke-hu-duan}
|
||||
---
|
||||
toc_priority: 17
|
||||
toc_title: 命令行客户端
|
||||
---
|
||||
|
||||
通过命令行来访问 ClickHouse,您可以使用 `clickhouse-client`
|
||||
# 命令行客户端 {#command-line-client}
|
||||
|
||||
ClickHouse提供了一个原生命令行客户端`clickhouse-client`客户端支持命令行支持的更多信息详见[Configuring](#interfaces_cli_configuration)。
|
||||
|
||||
[安装部署](../getting-started/index.md)后,系统默认会安装`clickhouse-client`(同时它属于`clickhouse-client`安装包中)。
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client
|
||||
ClickHouse client version 0.0.26176.
|
||||
Connecting to localhost:9000.
|
||||
Connected to ClickHouse server version 0.0.26176.:)
|
||||
ClickHouse client version 19.17.1.1579 (official build).
|
||||
Connecting to localhost:9000 as user default.
|
||||
Connected to ClickHouse server version 19.17.1 revision 54428.
|
||||
|
||||
:)
|
||||
```
|
||||
|
||||
该客户端支持命令行参数以及配置文件。查看更多,请看 «[配置](#interfaces_cli_configuration)»
|
||||
不同的客户端和服务器版本彼此兼容,但是一些特性可能在旧客户机中不可用。我们建议使用与服务器应用相同版本的客户端。当你尝试使用旧版本的客户端时,服务器上的`clickhouse-client`会显示如下信息:
|
||||
|
||||
## 使用方式 {#shi-yong-fang-shi}
|
||||
ClickHouse client version is older than ClickHouse server. It may lack support for new features.
|
||||
|
||||
这个客户端可以选择使用交互式与非交互式(批量)两种模式。
|
||||
使用批量模式,要指定 `query` 参数,或者发送数据到 `stdin`(它会检查 `stdin` 是否是 Terminal),或者两种同时使用。
|
||||
它与 HTTP 接口很相似,当使用 `query` 参数发送数据到 `stdin` 时,客户端请求就是一行一行的 `stdin` 输入作为 `query` 的参数。这种方式在大规模的插入请求中非常方便。
|
||||
## 使用方式 {#cli_usage}
|
||||
|
||||
使用这个客户端插入数据的示例:
|
||||
客户端可以在交互和非交互(批处理)模式下使用。要使用批处理模式,请指定`query`参数,或将数据发送到`stdin`(它会验证`stdin`是否是终端),或两者同时进行。与HTTP接口类似,当使用`query`参数并向`stdin`发送数据时,客户端请求就是一行一行的`stdin`输入作为`query`的参数。这种方式在大规模的插入请求中非常方便。
|
||||
|
||||
使用客户端插入数据的示例:
|
||||
|
||||
``` bash
|
||||
echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
$ echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
|
||||
cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
$ cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
3, 'some text', '2016-08-14 00:00:00'
|
||||
4, 'some more text', '2016-08-14 00:00:01'
|
||||
_EOF
|
||||
|
||||
cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
$ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
```
|
||||
|
||||
在批量模式中,默认的数据格式是 `TabSeparated` 分隔的。您可以根据查询来灵活设置 FORMAT 格式。
|
||||
在批量模式中,默认的数据格式是`TabSeparated`分隔的。您可以根据查询来灵活设置FORMAT格式。
|
||||
|
||||
默认情况下,在批量模式中只能执行单个查询。为了从一个 Script 中执行多个查询,可以使用 `--multiquery` 参数。除了 INSERT 请求外,这种方式在任何地方都有用。查询的结果会连续且不含分隔符地输出。
|
||||
同样的,为了执行大规模的查询,您可以为每个查询执行一次 `clickhouse-client`。但注意到每次启动 `clickhouse-client` 程序都需要消耗几十毫秒时间。
|
||||
默认情况下,在批量模式中只能执行单个查询。为了从一个Script中执行多个查询,可以使用`--multiquery`参数。除了INSERT请求外,这种方式在任何地方都有用。查询的结果会连续且不含分隔符地输出。
|
||||
同样的,为了执行大规模的查询,您可以为每个查询执行一次`clickhouse-client`。但注意到每次启动`clickhouse-client`程序都需要消耗几十毫秒时间。
|
||||
|
||||
在交互模式下,每条查询过后,你可以直接输入下一条查询命令。
|
||||
|
||||
如果 `multiline` 没有指定(默认没指定):为了执行查询,按下 Enter 即可。查询语句不是必须使用分号结尾。如果需要写一个多行的查询语句,可以在换行之前输入一个反斜杠`\`,然后在您按下 Enter 键后,您就可以输入当前语句的下一行查询了。
|
||||
如果`multiline`没有指定(默认没指定):为了执行查询,按下Enter即可。查询语句不是必须使用分号结尾。如果需要写一个多行的查询语句,可以在换行之前输入一个反斜杠`\`,然后在您按下Enter键后,您就可以输入当前语句的下一行查询了。
|
||||
|
||||
如果 `multiline` 指定了:为了执行查询,需要以分号结尾并且按下 Enter 键。如果行末没有分号,将认为当前语句并没有输入完而要求继续输入下一行。
|
||||
如果指定了`multiline`:为了执行查询,需要以分号结尾并且按下Enter键。如果行末没有分号,将认为当前语句并没有输入完而要求继续输入下一行。
|
||||
|
||||
若只运行单个查询,分号后面的所有内容都会被忽略。
|
||||
|
||||
您可以指定 `\G` 来替代分号或者在分号后面,这表示 `Vertical` 的格式。在这种格式下,每一个值都会打印在不同的行中,这种方式对于宽表来说很方便。这个不常见的特性是为了兼容 MySQL 命令而加的。
|
||||
您可以指定`\G`来替代分号或者在分号后面,这表示使用`Vertical`的格式。在这种格式下,每一个值都会打印在不同的行中,这种方式对于宽表来说很方便。这个不常见的特性是为了兼容MySQL命令而加的。
|
||||
|
||||
命令行客户端是基于 `replxx`。换句话说,它可以使用我们熟悉的快捷键方式来操作以及保留历史命令。
|
||||
历史命令会写入在 `~/.clickhouse-client-history` 中。
|
||||
命令行客户端是基于`replxx`(类似于`readline`)。换句话说,它可以使用我们熟悉的快捷键方式来操作以及保留历史命令。
|
||||
历史命令会写入在`~/.clickhouse-client-history`中。
|
||||
|
||||
默认情况下,输出的格式是 `PrettyCompact`。您可以通过 FORMAT 设置根据不同查询来修改格式,或者通过在查询末尾指定 `\G` 字符,或通过在命令行中使用 `--format` 或 `--vertical` 参数,或使用客户端的配置文件。
|
||||
默认情况下,输出的格式是`PrettyCompact`。您可以通过FORMAT设置根据不同查询来修改格式,或者通过在查询末尾指定`\G`字符,或通过在命令行中使用`--format`或`--vertical`参数,或使用客户端的配置文件。
|
||||
|
||||
若要退出客户端,使用 Ctrl+D (或 Ctrl+C),或者输入以下其中一个命令:`exit`, `quit`, `logout`, `учше`, `йгше`, `дщпщге`, `exit;`, `quit;`, `logout;`, `учшеж`, `йгшеж`, `дщпщгеж`, `q`, `й`, `q`, `Q`, `:q`, `й`, `Й`, `Жй`
|
||||
若要退出客户端,使用Ctrl+D(或Ctrl+C),或者输入以下其中一个命令:`exit`, `quit`, `logout`, `учше`, `йгше`, `дщпщге`, `exit;`, `quit;`, `logout;`, `q`, `Q`, `:q`
|
||||
|
||||
当执行一个查询的时候,客户端会显示:
|
||||
|
||||
1. 进度, 进度会每秒更新十次 (默认情况下)。 对于很快的查询,进度可能没有时间显示。
|
||||
1. 进度, 进度会每秒更新十次(默认情况下)。对于很快的查询,进度可能没有时间显示。
|
||||
2. 为了调试会显示解析且格式化后的查询语句。
|
||||
3. 指定格式的输出结果。
|
||||
4. 输出结果的行数的行数,经过的时间,以及查询处理的速度。
|
||||
|
||||
您可以通过 Ctrl+C 来取消一个长时间的查询。然而,您依然需要等待服务端来中止请求。在某个阶段去取消查询是不可能的。如果您不等待并再次按下 Ctrl + C,客户端将会退出。
|
||||
您可以通过Ctrl+C来取消一个长时间的查询。然而,您依然需要等待服务端来中止请求。在某个阶段去取消查询是不可能的。如果您不等待并再次按下Ctrl + C,客户端将会退出。
|
||||
|
||||
命令行客户端允许通过外部数据 (外部临时表) 来查询。更多相关信息,请参考 «[外部数据查询处理](../engines/table-engines/special/external-data.md)».
|
||||
命令行客户端允许通过外部数据(外部临时表)来查询。更多相关信息,请参考 «[外部数据查询处理](../engines/table-engines/special/external-data.md)».
|
||||
|
||||
### 查询参数 {#cli-queries-with-parameters}
|
||||
|
||||
您可以创建带有参数的查询,并将值从客户端传递给服务器。这允许避免在客户端使用特定的动态值格式化查询。例如:
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {parName:Array(UInt16)}"
|
||||
```
|
||||
|
||||
#### 查询语法 {#cli-queries-with-parameters-syntax}
|
||||
|
||||
像平常一样格式化一个查询,然后把你想要从app参数传递到查询的值用大括号格式化,格式如下:
|
||||
|
||||
``` sql
|
||||
{<name>:<data type>}
|
||||
```
|
||||
|
||||
- `name` — 占位符标识符。在控制台客户端,使用`--param_<name> = value`来指定
|
||||
- `data type` — [数据类型](../sql-reference/data-types/index.md)参数值。例如,一个数据结构`(integer, ('string', integer))`拥有`Tuple(UInt8, Tuple(String, UInt8))`数据类型(你也可以用另一个[integer](../sql-reference/data-types/int-uint.md)类型)。
|
||||
|
||||
#### 示例 {#example}
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM table WHERE val = {tuple_in_tuple:Tuple(UInt8, Tuple(String, UInt8))}"
|
||||
```
|
||||
|
||||
## 配置 {#interfaces_cli_configuration}
|
||||
|
||||
您可以通过以下方式传入参数到 `clickhouse-client` 中 (所有的参数都有默认值):
|
||||
您可以通过以下方式传入参数到`clickhouse-client`中(所有的参数都有默认值):
|
||||
|
||||
- 通过命令行
|
||||
|
||||
@ -75,29 +109,32 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA
|
||||
|
||||
配置文件的配置会覆盖默认值
|
||||
|
||||
### 命令行参数 {#ming-ling-xing-can-shu}
|
||||
### 命令行参数 {#command-line-options}
|
||||
|
||||
- `--host, -h` -– 服务端的 host 名称, 默认是 ‘localhost’。 您可以选择使用 host 名称或者 IPv4 或 IPv6 地址。
|
||||
- `--port` – 连接的端口,默认值: 9000。注意 HTTP 接口以及 TCP 原生接口是使用不同端口的。
|
||||
- `--user, -u` – 用户名。 默认值: default。
|
||||
- `--password` – 密码。 默认值: 空字符串。
|
||||
- `--query, -q` – 非交互模式下的查询语句.
|
||||
- `--database, -d` – 默认当前操作的数据库. 默认值: 服务端默认的配置 (默认是 `default`)。
|
||||
- `--multiline, -m` – 如果指定,允许多行语句查询(Enter 仅代表换行,不代表查询语句完结)。
|
||||
- `--multiquery, -n` – 如果指定, 允许处理用逗号分隔的多个查询,只在非交互模式下生效。
|
||||
- `--host, -h` -– 服务端的host名称, 默认是`localhost`。您可以选择使用host名称或者IPv4或IPv6地址。
|
||||
- `--port` – 连接的端口,默认值:9000。注意HTTP接口以及TCP原生接口使用的是不同端口。
|
||||
- `--user, -u` – 用户名。 默认值:`default`。
|
||||
- `--password` – 密码。 默认值:空字符串。
|
||||
- `--query, -q` – 使用非交互模式查询。
|
||||
- `--database, -d` – 默认当前操作的数据库. 默认值:服务端默认的配置(默认是`default`)。
|
||||
- `--multiline, -m` – 如果指定,允许多行语句查询(Enter仅代表换行,不代表查询语句完结)。
|
||||
- `--multiquery, -n` – 如果指定, 允许处理用`;`号分隔的多个查询,只在非交互模式下生效。
|
||||
- `--format, -f` – 使用指定的默认格式输出结果。
|
||||
- `--vertical, -E` – 如果指定,默认情况下使用垂直格式输出结果。这与 ‘–format=Vertical’ 相同。在这种格式中,每个值都在单独的行上打印,这种方式对显示宽表很有帮助。
|
||||
- `--time, -t` – 如果指定,非交互模式下会打印查询执行的时间到 ‘stderr’ 中。
|
||||
- `--vertical, -E` – 如果指定,默认情况下使用垂直格式输出结果。这与`–format=Vertical`相同。在这种格式中,每个值都在单独的行上打印,这种方式对显示宽表很有帮助。
|
||||
- `--time, -t` – 如果指定,非交互模式下会打印查询执行的时间到`stderr`中。
|
||||
- `--stacktrace` – 如果指定,如果出现异常,会打印堆栈跟踪信息。
|
||||
- `--config-file` – 配置文件的名称。
|
||||
- `--secure` – 如果指定,将通过安全连接连接到服务器。
|
||||
- `--history_file` — 存放命令历史的文件的路径。
|
||||
- `--param_<name>` — 查询参数配置[查询参数](#cli-queries-with-parameters).
|
||||
|
||||
### 配置文件 {#pei-zhi-wen-jian}
|
||||
### 配置文件 {#configuration_files}
|
||||
|
||||
`clickhouse-client` 使用一下第一个存在的文件:
|
||||
`clickhouse-client`使用以下第一个配置文件:
|
||||
|
||||
- 通过 `--config-file` 参数指定的文件.
|
||||
- 通过`--config-file`参数指定。
|
||||
- `./clickhouse-client.xml`
|
||||
- `\~/.clickhouse-client/config.xml`
|
||||
- `~/.clickhouse-client/config.xml`
|
||||
- `/etc/clickhouse-client/config.xml`
|
||||
|
||||
配置文件示例:
|
||||
@ -106,6 +143,7 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA
|
||||
<config>
|
||||
<user>username</user>
|
||||
<password>password</password>
|
||||
<secure>False</secure>
|
||||
</config>
|
||||
```
|
||||
|
||||
|
@ -1,5 +1,10 @@
|
||||
# C ++客户端库 {#c-ke-hu-duan-ku}
|
||||
---
|
||||
toc_priority: 24
|
||||
toc_title: C++客户端库
|
||||
---
|
||||
|
||||
请参阅以下网站的自述文件[ツ暗ェツ氾环催ツ団](https://github.com/ClickHouse/clickhouse-cpp)资料库。
|
||||
# C++客户端库 {#c-client-library}
|
||||
|
||||
请参考仓库的描述文件[clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp)。
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/zh/interfaces/cpp/) <!--hide-->
|
||||
|
@ -1,23 +1,31 @@
|
||||
# HTTP 客户端 {#http-ke-hu-duan}
|
||||
---
|
||||
toc_priority: 19
|
||||
toc_title: HTTP客户端
|
||||
---
|
||||
|
||||
HTTP 接口可以让你通过任何平台和编程语言来使用 ClickHouse。我们用 Java 和 Perl 以及 shell 脚本来访问它。在其他的部门中,HTTP 接口会用在 Perl,Python 以及 Go 中。HTTP 接口比 TCP 原生接口更为局限,但是却有更好的兼容性。
|
||||
# HTTP客户端 {#http-interface}
|
||||
|
||||
默认情况下,clickhouse-server 会在端口 8123 上监控 HTTP 请求(这可以在配置中修改)。
|
||||
如果你发送了一个不带参数的 GET 请求,它会返回一个字符串 «Ok.»(结尾有换行)。可以将它用在健康检查脚本中。
|
||||
HTTP接口允许您在任何编程语言的任何平台上使用ClickHouse。我们使用它在Java和Perl以及shell脚本中工作。在其他部门中,HTTP接口用于Perl、Python和Go。HTTP接口比原生接口受到更多的限制,但它具有更好的兼容性。
|
||||
|
||||
默认情况下,`clickhouse-server`会在`8123`端口上监控HTTP请求(这可以在配置中修改)。
|
||||
|
||||
如果你发送了一个未携带任何参数的`GET /`请求,它会返回一个字符串 «Ok.»(结尾有换行)。可以将它用在健康检查脚本中。
|
||||
|
||||
如果你发送了一个未携带任何参数的`GET /`请求,它返回响应码200和`OK`字符串定义,可在[Http服务响应配置](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response)定义(在末尾添加换行)
|
||||
|
||||
``` bash
|
||||
$ curl 'http://localhost:8123/'
|
||||
Ok.
|
||||
```
|
||||
|
||||
通过 URL 中的 `query` 参数来发送请求,或者发送 POST 请求,或者将查询的开头部分放在 URL 的 `query` 参数中,其他部分放在 POST 中(我们会在后面解释为什么这样做是有必要的)。URL 的大小会限制在 16 KB,所以发送大型查询时要时刻记住这点。
|
||||
通过URL中的 `query` 参数来发送请求,或者发送POST请求,或者将查询的开头部分放在URL的`query`参数中,其他部分放在POST中(我们会在后面解释为什么这样做是有必要的)。URL的大小会限制在16KB,所以发送大型查询时要时刻记住这点。
|
||||
|
||||
如果请求成功,将会收到 200 的响应状态码和响应主体中的结果。
|
||||
如果发生了某个异常,将会收到 500 的响应状态码和响应主体中的异常描述信息。
|
||||
如果请求成功,将会收到200的响应状态码和响应主体中的结果。
|
||||
如果发生了某个异常,将会收到500的响应状态码和响应主体中的异常描述信息。
|
||||
|
||||
当使用 GET 方法请求时,`readonly` 会被设置。换句话说,若要作修改数据的查询,只能发送 POST 方法的请求。可以将查询通过 POST 主体发送,也可以通过 URL 参数发送。
|
||||
当使用GET方法请求时,`readonly`会被设置。换句话说,若要作修改数据的查询,只能发送POST方法的请求。可以将查询通过POST主体发送,也可以通过URL参数发送。
|
||||
|
||||
例:
|
||||
示例:
|
||||
|
||||
``` bash
|
||||
$ curl 'http://localhost:8123/?query=SELECT%201'
|
||||
@ -26,13 +34,14 @@ $ curl 'http://localhost:8123/?query=SELECT%201'
|
||||
$ wget -nv -O- 'http://localhost:8123/?query=SELECT 1'
|
||||
1
|
||||
|
||||
$ GET 'http://localhost:8123/?query=SELECT 1'
|
||||
1
|
||||
|
||||
$ echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123
|
||||
HTTP/1.0 200 OK
|
||||
Date: Wed, 27 Nov 2019 10:30:18 GMT
|
||||
Connection: Close
|
||||
Date: Fri, 16 Nov 2012 19:21:50 GMT
|
||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
|
||||
1
|
||||
```
|
||||
@ -50,7 +59,22 @@ $ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @-
|
||||
1
|
||||
```
|
||||
|
||||
如果一部分请求是通过参数发送的,另外一部分通过 POST 主体发送,两部分查询之间会一行空行插入。
|
||||
如您所见,curl有些不方便,因为空格必须进行URL转义。
|
||||
尽管wget本身会对所有内容进行转义,但我们不推荐使用它,因为在使用keepalive和传输编码chunked时,它在HTTP 1.1上不能很好地工作。
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @-
|
||||
1
|
||||
|
||||
$ echo 'SELECT 1' | curl 'http://localhost:8123/?query=' --data-binary @-
|
||||
1
|
||||
|
||||
$ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @-
|
||||
1
|
||||
```
|
||||
|
||||
如果部分查询是在参数中发送的,部分是在POST中发送的,则在这两个数据部分之间插入换行。
|
||||
|
||||
错误示例:
|
||||
|
||||
``` bash
|
||||
@ -60,8 +84,11 @@ ECT 1
|
||||
, expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception
|
||||
```
|
||||
|
||||
默认情况下,返回的数据是 TabSeparated 格式的,更多信息,见 «\[数据格式\]» 部分。
|
||||
可以使用 FORMAT 设置查询来请求不同格式。
|
||||
默认情况下,返回的数据是`TabSeparated`格式的,更多信息,见[Formats](../interfaces/formats/)部分。
|
||||
|
||||
您可以使用查询的FORMAT子句来设置其他格式。
|
||||
|
||||
另外,还可以使用`default_format`URL参数或`X-ClickHouse-Format`头来指定TabSeparated之外的默认格式。
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @-
|
||||
@ -72,42 +99,42 @@ $ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @
|
||||
└───┘
|
||||
```
|
||||
|
||||
INSERT 必须通过 POST 方法来插入数据。这种情况下,你可以将查询的开头部分放在 URL 参数中,然后用 POST 主体传入插入的数据。插入的数据可以是,举个例子,从 MySQL 导出的以 tab 分割的数据。在这种方式中,INSERT 查询取代了 LOAD DATA LOCAL INFILE from MySQL。
|
||||
INSERT必须通过POST方法来插入数据。在这种情况下,您可以在URL参数中编写查询的开始部分,并使用POST传递要插入的数据。例如,要插入的数据可以是来自MySQL的一个以tab分隔的存储。通过这种方式,INSERT查询替换了从MySQL查询的LOAD DATA LOCAL INFILE。
|
||||
|
||||
示例: 创建一个表:
|
||||
|
||||
``` bash
|
||||
echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | POST 'http://localhost:8123/'
|
||||
$ echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @-
|
||||
```
|
||||
|
||||
使用类似 INSERT 的查询来插入数据:
|
||||
使用类似INSERT的查询来插入数据:
|
||||
|
||||
``` bash
|
||||
echo 'INSERT INTO t VALUES (1),(2),(3)' | POST 'http://localhost:8123/'
|
||||
$ echo 'INSERT INTO t VALUES (1),(2),(3)' | curl 'http://localhost:8123/' --data-binary @-
|
||||
```
|
||||
|
||||
数据可以从查询中单独发送:
|
||||
|
||||
``` bash
|
||||
echo '(4),(5),(6)' | POST 'http://localhost:8123/?query=INSERT INTO t VALUES'
|
||||
$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @-
|
||||
```
|
||||
|
||||
可以指定任何数据格式。值的格式和写入表 `t` 的值的格式相同:
|
||||
您可以指定任何数据格式。`Values`格式与将INSERT写入`t`值时使用的格式相同:
|
||||
|
||||
``` bash
|
||||
echo '(7),(8),(9)' | POST 'http://localhost:8123/?query=INSERT INTO t FORMAT Values'
|
||||
$ echo '(7),(8),(9)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20Values' --data-binary @-
|
||||
```
|
||||
|
||||
若要插入 tab 分割的数据,需要指定对应的格式:
|
||||
若要插入tab分割的数据,需要指定对应的格式:
|
||||
|
||||
``` bash
|
||||
echo -ne '10\n11\n12\n' | POST 'http://localhost:8123/?query=INSERT INTO t FORMAT TabSeparated'
|
||||
$ echo -ne '10\n11\n12\n' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20TabSeparated' --data-binary @-
|
||||
```
|
||||
|
||||
从表中读取内容。由于查询处理是并行的,数据以随机顺序输出。
|
||||
|
||||
``` bash
|
||||
$ GET 'http://localhost:8123/?query=SELECT a FROM t'
|
||||
$ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t'
|
||||
7
|
||||
8
|
||||
9
|
||||
@ -122,22 +149,37 @@ $ GET 'http://localhost:8123/?query=SELECT a FROM t'
|
||||
6
|
||||
```
|
||||
|
||||
删除表。
|
||||
删除表:
|
||||
|
||||
``` bash
|
||||
POST 'http://localhost:8123/?query=DROP TABLE t'
|
||||
$ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @-
|
||||
```
|
||||
|
||||
成功请求后并不会返回数据,返回一个空的响应体。
|
||||
|
||||
可以通过压缩来传输数据。压缩的数据没有一个标准的格式,但你需要指定一个压缩程序来使用它(sudo apt-get install compressor-metrika-yandex)。
|
||||
在传输数据时,可以使用ClickHouse内部压缩格式。压缩的数据具有非标准格式,您需要使用特殊的`clickhouse-compressor`程序来处理它(它是与`clickhouse-client`包一起安装的)。为了提高数据插入的效率,您可以通过使用[http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress)设置禁用服务器端校验。
|
||||
|
||||
如果在 URL 中指定了 `compress=1` ,服务会返回压缩的数据。
|
||||
如果在 URL 中指定了 `decompress=1` ,服务会解压通过 POST 方法发送的数据。
|
||||
如果在URL中指定了`compress=1`,服务会返回压缩的数据。
|
||||
如果在URL中指定了`decompress=1`,服务会解压通过POST方法发送的数据。
|
||||
|
||||
可以通过为每份数据进行立即压缩来减少大规模数据传输中的网络压力。
|
||||
您也可以选择使用[HTTP compression](https://en.wikipedia.org/wiki/HTTP_compression)。发送一个压缩的POST请求,附加请求头`Content-Encoding: compression_method`。为了使ClickHouse响应,您必须附加`Accept-Encoding: compression_method`。ClickHouse支持`gzip`,`br`和`deflate` [compression methods](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens)。要启用HTTP压缩,必须使用ClickHouse[启用Http压缩](../operations/settings/settings.md#settings-enable_http_compression)配置。您可以在[Http zlib压缩级别](#settings-http_zlib_compression_level)设置中为所有压缩方法配置数据压缩级别。
|
||||
|
||||
可以指定 ‘database’ 参数来指定默认的数据库。
|
||||
您可以使用它在传输大量数据时减少网络流量,或者创建立即压缩的转储。
|
||||
|
||||
通过压缩发送数据的例子:
|
||||
|
||||
``` bash
|
||||
#Sending data to the server:
|
||||
$ curl -vsS "http://localhost:8123/?enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10' -H 'Accept-Encoding: gzip'
|
||||
|
||||
#Sending data to the client:
|
||||
$ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gzip' 'http://localhost:8123/'
|
||||
```
|
||||
|
||||
!!! note "警告"
|
||||
一些HTTP客户端可能会在默认情况下从服务器解压数据(使用`gzip`和`deflate`),即使您未正确地使用了压缩设置,您也可能会得到解压数据。
|
||||
|
||||
您可以使用`database`URL参数或`X-ClickHouse-Database`头来指定默认数据库。
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @-
|
||||
@ -153,30 +195,38 @@ $ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?data
|
||||
9
|
||||
```
|
||||
|
||||
默认情况下,默认数据库会在服务的配置中注册,默认是 `default`。或者,也可以在表名之前使用一个点来指定数据库。
|
||||
默认情况下,在服务器设置中注册的数据库被用作默认数据库。默认情况下,它是名为`default`的数据库。或者,您可以始终在表名之前使用点来指定数据库。
|
||||
|
||||
用户名密码可以通过以下两种方式指定:
|
||||
用户名和密码可以通过以下三种方式指定:
|
||||
|
||||
1. 通过 HTTP Basic Authentication。示例:
|
||||
1. 通过HTTP Basic Authentication。示例:
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` bash
|
||||
echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @-
|
||||
$ echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @-
|
||||
```
|
||||
|
||||
1. 通过 URL 参数 中的 ‘user’ 和 ‘password’。示例:
|
||||
1. 通过URL参数中的`user`和`password`。示例:
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` bash
|
||||
echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @-
|
||||
$ echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @-
|
||||
```
|
||||
|
||||
如果用户名没有指定,默认的用户是 `default`。如果密码没有指定,默认会使用空密码。
|
||||
可以使用 URL 参数指定配置或者设置整个配置文件来处理单个查询。示例:`http://localhost:8123/?profile=web&max_rows_to_read=1000000000&query=SELECT+1`
|
||||
1. 使用`X-ClickHouse-User`或`X-ClickHouse-Key`头指定,示例:
|
||||
|
||||
更多信息,参见 «[设置](../operations/settings/index.md#settings)» 部分。
|
||||
<!-- -->
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT 1' | curl -H 'X-ClickHouse-User: user' -H 'X-ClickHouse-Key: password' 'http://localhost:8123/' -d @-
|
||||
```
|
||||
|
||||
如果未指定用户名,则使用`default`。如果未指定密码,则使用空密码。
|
||||
您还可以使用URL参数来指定处理单个查询或整个设置配置文件的任何设置。例子:`http://localhost:8123/?profile=web&max_rows_to_read=1000000000&query=SELECT+1`
|
||||
|
||||
更多信息,详见[设置](../operations/settings/index.md#settings)部分。
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:8123/?' --data-binary @-
|
||||
@ -192,30 +242,386 @@ $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:812
|
||||
9
|
||||
```
|
||||
|
||||
更多关于其他参数的信息,参见 «[设置](../operations/settings/index.md#settings)» 部分。
|
||||
有关其他参数的信息,请参考`SET`一节。
|
||||
|
||||
相比起 TCP 原生接口,HTTP 接口不支持会话和会话设置的概念,不允许中止查询(准确地说,只在少数情况下允许),不显示查询处理的进展。执行解析和数据格式化都是在服务端处理,网络上会比 TCP 原生接口更低效。
|
||||
类似地,您可以在HTTP协议中使用ClickHouse会话。为此,需要向请求添加`session_id`GET参数。您可以使用任何字符串作为会话ID。默认情况下,会话在60秒不活动后终止。要更改此超时配置,请修改服务器配置中的`default_session_timeout`设置,或向请求添加`session_timeout`GET参数。要检查会话状态,使用`session_check=1`参数。一次只能在单个会话中执行一个查询。
|
||||
|
||||
可选的 `query_id` 参数可能当做 query ID 传入(或者任何字符串)。更多信息,参见 «[设置 replace_running_query](../operations/settings/settings.md)» 部分。
|
||||
您可以在`X-ClickHouse-Progress`响应头中收到查询进度的信息。为此,启用[Http Header携带进度](../operations/settings/settings.md#settings-send_progress_in_http_headers)。示例:
|
||||
|
||||
可选的 `quota_key` 参数可能当做 quota key 传入(或者任何字符串)。更多信息,参见 «[配额](../operations/quotas.md#quotas)» 部分。
|
||||
``` text
|
||||
X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128"}
|
||||
X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128"}
|
||||
X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128"}
|
||||
```
|
||||
|
||||
HTTP 接口允许传入额外的数据(外部临时表)来查询。更多信息,参见 «[外部数据查询处理](../engines/table-engines/special/external-data.md)» 部分。
|
||||
显示字段信息:
|
||||
|
||||
## 响应缓冲 {#xiang-ying-huan-chong}
|
||||
- `read_rows` — 读取的行数。
|
||||
- `read_bytes` — 读取的数据字节数。
|
||||
- `total_rows_to_read` — 读取的数据总行数。
|
||||
- `written_rows` — 写入数据行数。
|
||||
- `written_bytes` — 写入数据字节数。
|
||||
|
||||
可以在服务器端启用响应缓冲。提供了 `buffer_size` 和 `wait_end_of_query` 两个URL 参数来达此目的。
|
||||
如果HTTP连接丢失,运行的请求不会自动停止。解析和数据格式化是在服务器端执行的,使用Http连接可能无效。
|
||||
|
||||
`buffer_size` 决定了查询结果要在服务内存中缓冲多少个字节数据. 如果响应体比这个阈值大,缓冲区会写入到 HTTP 管道,剩下的数据也直接发到 HTTP 管道中。
|
||||
可选的`query_id`参数可能当做query ID传入(或者任何字符串)。更多信息,详见[replace_running_query](../operations/settings/settings.md)部分。
|
||||
|
||||
为了确保整个响应体被缓冲,可以设置 `wait_end_of_query=1`。这种情况下,存入内存的数据会被缓冲到服务端的一个临时文件中。
|
||||
可选的`quota_key`参数可能当做quota key传入(或者任何字符串)。更多信息,详见[Quotas](../operations/quotas.md#quotas)部分。
|
||||
|
||||
HTTP接口允许传入额外的数据(外部临时表)来查询。更多信息,详见[外部数据查询处理](../engines/table-engines/special/external-data.md)部分。
|
||||
|
||||
## 响应缓冲 {#response-buffering}
|
||||
|
||||
可以在服务器端启用响应缓冲。提供了`buffer_size`和`wait_end_of_query`两个URL参数来达此目的。
|
||||
|
||||
`buffer_size`决定了查询结果要在服务内存中缓冲多少个字节数据. 如果响应体比这个阈值大,缓冲区会写入到HTTP管道,剩下的数据也直接发到HTTP管道中。
|
||||
|
||||
为了确保整个响应体被缓冲,可以设置`wait_end_of_query=1`。这种情况下,存入内存的数据会被缓冲到服务端的一个临时文件中。
|
||||
|
||||
示例:
|
||||
|
||||
``` bash
|
||||
curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary'
|
||||
$ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary'
|
||||
```
|
||||
|
||||
查询请求响应状态码和 HTTP 头被发送到客户端后,若发生查询处理出错,使用缓冲区可以避免这种情况的发生。在这种情况下,响应主体的结尾会写入一条错误消息,而在客户端,只能在解析阶段检测到该错误。
|
||||
查询请求响应状态码和HTTP头被发送到客户端后,若发生查询处理出错,使用缓冲区可以避免这种情况的发生。在这种情况下,响应主体的结尾会写入一条错误消息,而在客户端,只能在解析阶段检测到该错误。
|
||||
|
||||
### 查询参数 {#cli-queries-with-parameters}
|
||||
|
||||
您可以使用参数创建查询,并通过相应的HTTP请求参数为它们传递值。有关更多信息,请参见[CLI查询参数](../interfaces/cli.md#cli-queries-with-parameters)。
|
||||
|
||||
### 示例 {#example}
|
||||
|
||||
``` bash
|
||||
$ curl -sS "<address>?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}"
|
||||
```
|
||||
|
||||
## 特定的HTTP接口 {#predefined_http_interface}
|
||||
|
||||
ClickHouse通过HTTP接口支持特定的查询。例如,您可以如下所示向表写入数据:
|
||||
|
||||
``` bash
|
||||
$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @-
|
||||
```
|
||||
|
||||
ClickHouse还支持预定义的HTTP接口,可以帮助您更容易与第三方工具集成,如[Prometheus Exporter](https://github.com/percona-lab/clickhouse_exporter).
|
||||
|
||||
示例:
|
||||
|
||||
- 首先,将此部分添加到服务器配置文件中:
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url>/predefined_query</url>
|
||||
<methods>POST,GET</methods>
|
||||
<handler>
|
||||
<type>predefined_query_handler</type>
|
||||
<query>SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n'</query>
|
||||
</handler>
|
||||
</rule>
|
||||
<rule>...</rule>
|
||||
<rule>...</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
- 请求Prometheus格式的URL以获取数据:
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` bash
|
||||
$ curl -v 'http://localhost:8123/predefined_query'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /predefined_query HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Tue, 28 Apr 2020 08:52:56 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< X-ClickHouse-Server-Display-Name: i-mloy5trc
|
||||
< Transfer-Encoding: chunked
|
||||
< X-ClickHouse-Query-Id: 96fe0052-01e6-43ce-b12a-6b7370de6e8a
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
# TYPE "Query" counter
|
||||
"Query" 1
|
||||
|
||||
# HELP "Merge" "Number of executing background merges"
|
||||
# TYPE "Merge" counter
|
||||
"Merge" 0
|
||||
|
||||
# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)"
|
||||
# TYPE "PartMutation" counter
|
||||
"PartMutation" 0
|
||||
|
||||
# HELP "ReplicatedFetch" "Number of data parts being fetched from replica"
|
||||
# TYPE "ReplicatedFetch" counter
|
||||
"ReplicatedFetch" 0
|
||||
|
||||
# HELP "ReplicatedSend" "Number of data parts being sent to replicas"
|
||||
# TYPE "ReplicatedSend" counter
|
||||
"ReplicatedSend" 0
|
||||
|
||||
* Connection #0 to host localhost left intact
|
||||
|
||||
* Connection #0 to host localhost left intact
|
||||
```
|
||||
|
||||
正如您从示例中看到的,如果在`config.xml`文件中配置了`http_handlers`,并且`http_handlers`可以包含许多`规则`。ClickHouse将把接收到的HTTP请求与`rule`中的预定义类型进行匹配,第一个匹配的将运行处理程序。如果匹配成功,ClickHouse将执行相应的预定义查询。
|
||||
|
||||
现在`rule`可以配置`method`, `header`, `url`, `handler`:
|
||||
- `method` 负责匹配HTTP请求的方法部分。 `method`完全符合HTTP协议中[method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods)的定义。这是一个可选的配置。如果它没有在配置文件中定义,那么它与HTTP请求的方法部分不匹配。
|
||||
|
||||
- `url` 负责匹配HTTP请求的URL部分。它匹配[RE2](https://github.com/google/re2)正则表达式。这是一个可选的配置。如果配置文件中没有定义它,则它与HTTP请求的URL部分不匹配。
|
||||
|
||||
- `headers` 负责匹配HTTP请求的头部分。它与RE2的正则表达式兼容。这是一个可选的配置。如果它没有在配置文件中定义,那么它与HTTP请求的头部分不匹配。
|
||||
|
||||
- `handler` 包含主要的处理部分。现在`handler`可以配置`type`, `status`, `content_type`, `response_content`, `query`, `query_param_name`。
|
||||
`type` 目前支持三种类型:[特定查询](#predefined_query_handler), [动态查询](#dynamic_query_handler), [static](#static).
|
||||
|
||||
- `query` — 使用`predefined_query_handler`类型,在调用处理程序时执行查询。
|
||||
|
||||
- `query_param_name` — 与`dynamic_query_handler`类型一起使用,提取并执行HTTP请求参数中与`query_param_name`值对应的值。
|
||||
|
||||
- `status` — 与`static`类型一起使用,响应状态代码。
|
||||
|
||||
- `content_type` — 与`static`类型一起使用,响应信息[content-type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type)。
|
||||
|
||||
- `response_content` — 与`static`类型一起使用,响应发送给客户端的内容,当使用前缀`file://`或`config://`时,从发送给客户端的文件或配置中查找内容。
|
||||
|
||||
接下来是不同`type`的配置方法。
|
||||
|
||||
### 特定查询 {#predefined_query_handler}
|
||||
|
||||
`predefined_query_handler` 支持设置`Settings`和`query_params`参数。您可以将`query`配置为`predefined_query_handler`类型。
|
||||
|
||||
`query` 是一个预定义的`predefined_query_handler`查询,它由ClickHouse在匹配HTTP请求并返回查询结果时执行。这是一个必须的配置。
|
||||
|
||||
以下是定义的[max_threads](../operations/settings/settings.md#settings-max_threads)和`max_alter_threads`设置, 然后查询系统表以检查这些设置是否设置成功。
|
||||
|
||||
示例:
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<method>GET</method>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
</headers>
|
||||
<handler>
|
||||
<type>predefined_query_handler</type>
|
||||
<query>SELECT value FROM system.settings WHERE name = {name_1:String}</query>
|
||||
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2'
|
||||
1
|
||||
max_alter_threads 2
|
||||
```
|
||||
|
||||
!!! note "警告"
|
||||
在一个`predefined_query_handler`中,只支持insert类型的一个`查询`。
|
||||
|
||||
### 动态查询 {#dynamic_query_handler}
|
||||
|
||||
`dynamic_query_handler`时,查询以HTTP请求参数的形式编写。区别在于,在`predefined_query_handler`中,查询是在配置文件中编写的。您可以在`dynamic_query_handler`中配置`query_param_name`。
|
||||
|
||||
ClickHouse提取并执行与HTTP请求URL中的`query_param_name`值对应的值。`query_param_name`的默认值是`/query`。这是一个可选的配置。如果配置文件中没有定义,则不会传入参数。
|
||||
|
||||
为了试验这个功能,示例定义了[max_threads](../operations/settings/settings.md#settings-max_threads)和`max_alter_threads`,`queries`设置是否成功的值。
|
||||
|
||||
示例:
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE_DYNAMIC</XXX> </headers>
|
||||
<handler>
|
||||
<type>dynamic_query_handler</type>
|
||||
<query_param_name>query_param</query_param_name>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' 'http://localhost:8123/own?max_threads=1&max_alter_threads=2¶m_name_1=max_threads¶m_name_2=max_alter_threads&query_param=SELECT%20name,value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D'
|
||||
max_threads 1
|
||||
max_alter_threads 2
|
||||
```
|
||||
|
||||
### static {#static}
|
||||
|
||||
`static`可以返回[content_type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type), [status](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status)和`response_content`。`response_content`可以返回指定的内容。
|
||||
|
||||
示例:
|
||||
|
||||
返回信息.
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<methods>GET</methods>
|
||||
<headers><XXX>xxx</XXX></headers>
|
||||
<url>/hi</url>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<status>402</status>
|
||||
<content_type>text/html; charset=UTF-8</content_type>
|
||||
<response_content>Say Hi!</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
<http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /hi HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> XXX:xxx
|
||||
>
|
||||
< HTTP/1.1 402 Payment Required
|
||||
< Date: Wed, 29 Apr 2020 03:51:26 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
Say Hi!%
|
||||
```
|
||||
|
||||
从配置中查找发送到客户端的内容。
|
||||
|
||||
``` xml
|
||||
<get_config_static_handler><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></get_config_static_handler>
|
||||
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<methods>GET</methods>
|
||||
<headers><XXX>xxx</XXX></headers>
|
||||
<url>/get_config_static_handler</url>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<response_content>config://get_config_static_handler</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /get_config_static_handler HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> XXX:xxx
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Wed, 29 Apr 2020 04:01:24 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||
```
|
||||
|
||||
从发送到客户端的文件中查找内容。
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<methods>GET</methods>
|
||||
<headers><XXX>xxx</XXX></headers>
|
||||
<url>/get_absolute_path_static_handler</url>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<content_type>text/html; charset=UTF-8</content_type>
|
||||
<response_content>file:///absolute_path_file.html</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
<rule>
|
||||
<methods>GET</methods>
|
||||
<headers><XXX>xxx</XXX></headers>
|
||||
<url>/get_relative_path_static_handler</url>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<content_type>text/html; charset=UTF-8</content_type>
|
||||
<response_content>file://./relative_path_file.html</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ user_files_path='/var/lib/clickhouse/user_files'
|
||||
$ sudo echo "<html><body>Relative Path File</body></html>" > $user_files_path/relative_path_file.html
|
||||
$ sudo echo "<html><body>Absolute Path File</body></html>" > $user_files_path/absolute_path_file.html
|
||||
$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /get_absolute_path_static_handler HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> XXX:xxx
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Wed, 29 Apr 2020 04:18:16 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /get_relative_path_static_handler HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> XXX:xxx
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Wed, 29 Apr 2020 04:18:31 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
```
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/http_interface/) <!--hide-->
|
||||
|
@ -1,8 +1,13 @@
|
||||
# JDBC 驱动 {#jdbc-qu-dong}
|
||||
---
|
||||
toc_priority: 22
|
||||
toc_title: JDBC驱动
|
||||
---
|
||||
|
||||
- **[官方JDBC 的驱动](https://github.com/ClickHouse/clickhouse-jdbc)**
|
||||
- 三方提供的 JDBC 驱动:
|
||||
- [掳胫–禄脢鹿脷露胫鲁隆鹿–酶](https://github.com/housepower/ClickHouse-Native-JDBC)
|
||||
# JDBC驱动 {#jdbc-driver}
|
||||
|
||||
- **[官方驱动](https://github.com/ClickHouse/clickhouse-jdbc)**
|
||||
- 第三方驱动:
|
||||
- [ClickHouse-Native-JDBC](https://github.com/housepower/ClickHouse-Native-JDBC)
|
||||
- [clickhouse4j](https://github.com/blynkkk/clickhouse4j)
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/jdbc/) <!--hide-->
|
||||
|
@ -1,19 +1,17 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 20
|
||||
toc_title: "MySQL\u63A5\u53E3"
|
||||
toc_title: MySQL接口
|
||||
---
|
||||
|
||||
# MySQL接口 {#mysql-interface}
|
||||
|
||||
ClickHouse支持MySQL线协议。 它可以通过启用 [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) 在配置文件中设置:
|
||||
ClickHouse支持MySQL wire通讯协议。可以通过在配置文件中设置 [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) 来启用它:
|
||||
|
||||
``` xml
|
||||
<mysql_port>9004</mysql_port>
|
||||
```
|
||||
|
||||
使用命令行工具连接的示例 `mysql`:
|
||||
使用命令行工具 `mysql` 进行连接的示例:
|
||||
|
||||
``` bash
|
||||
$ mysql --protocol tcp -u default -P 9004
|
||||
@ -37,12 +35,12 @@ Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
|
||||
mysql>
|
||||
```
|
||||
|
||||
为了与所有MySQL客户端兼容,建议使用以下命令指定用户密码 [双SHA1](../operations/settings/settings-users.md#password_double_sha1_hex) 在配置文件中。
|
||||
如果使用用户密码指定 [SHA256](../operations/settings/settings-users.md#password_sha256_hex),一些客户端将无法进行身份验证(mysqljs和旧版本的命令行工具mysql)。
|
||||
为了与所有MySQL客户端兼容,建议在配置文件中使用 [double SHA1](../operations/settings/settings-users.md#password_double_sha1_hex) 来指定用户密码。
|
||||
如果使用 [SHA256](../operations/settings/settings-users.md#password_sha256_hex) 指定用户密码,一些客户端将无法进行身份验证(比如mysqljs和旧版本的命令行工具mysql)。
|
||||
|
||||
限制:
|
||||
|
||||
- 不支持准备好的查询
|
||||
- 不支持prepared queries
|
||||
|
||||
- 某些数据类型以字符串形式发送
|
||||
|
||||
|
@ -1,5 +1,10 @@
|
||||
# ODBC 驱动 {#odbc-qu-dong}
|
||||
---
|
||||
toc_priority: 23
|
||||
toc_title: ODBC驱动
|
||||
---
|
||||
|
||||
- ClickHouse官方有 ODBC 的驱动。 见 [这里](https://github.com/ClickHouse/clickhouse-odbc)。
|
||||
# ODBC驱动 {#odbc-driver}
|
||||
|
||||
- [官方驱动](https://github.com/ClickHouse/clickhouse-odbc)。
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/odbc/) <!--hide-->
|
||||
|
@ -1,5 +1,10 @@
|
||||
# 原生客户端接口(TCP) {#yuan-sheng-ke-hu-duan-jie-kou-tcp}
|
||||
---
|
||||
toc_priority: 18
|
||||
toc_title: 原生接口(TCP)
|
||||
---
|
||||
|
||||
本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client))和/或拦截和分析TCP流量。
|
||||
# 原生接口(TCP){#native-interface-tcp}
|
||||
|
||||
原生接口用于[命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C++程序。可惜的是,原生的ClickHouse协议还没有正式的规范,但它可以从ClickHouse[源代码](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)通过拦截和分析TCP流量进行反向工程。
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/tcp/) <!--hide-->
|
||||
|
@ -18,7 +18,7 @@ toc_title: "\u81EA\u7701"
|
||||
|
||||
- 设置 [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) 设置为1。
|
||||
|
||||
For security reasons introspection functions are disabled by default.
|
||||
出于安全考虑,内省函数默认是关闭的。
|
||||
|
||||
ClickHouse将探查器报告保存到 [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 系统表. 确保正确配置了表和探查器。
|
||||
|
||||
@ -36,17 +36,17 @@ addressToLine(address_of_binary_instruction)
|
||||
|
||||
**参数**
|
||||
|
||||
- `address_of_binary_instruction` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Address of instruction in a running process.
|
||||
- `address_of_binary_instruction` ([UInt64](../../sql-reference/data-types/int-uint.md)) — 正在运行进程的指令地址。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 源代码文件名和此文件中用冒号分隔的行号。
|
||||
- 源代码文件名和行号(用冒号分隔的行号)
|
||||
|
||||
For example, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number.
|
||||
示例, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number.
|
||||
|
||||
- 二进制文件的名称,如果函数找不到调试信息。
|
||||
- 如果函数找不到调试信息,返回二进制文件的名称。
|
||||
|
||||
- 空字符串,如果地址无效。
|
||||
- 如果地址无效,返回空字符串。
|
||||
|
||||
类型: [字符串](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -132,7 +132,7 @@ addressToSymbol(address_of_binary_instruction)
|
||||
**返回值**
|
||||
|
||||
- 来自ClickHouse对象文件的符号。
|
||||
- 空字符串,如果地址无效。
|
||||
- 如果地址无效,返回空字符串。
|
||||
|
||||
类型: [字符串](../../sql-reference/data-types/string.md).
|
||||
|
||||
|
@ -60,13 +60,13 @@ public:
|
||||
const String & user_, const String & password_, const String & stage,
|
||||
bool randomize_, size_t max_iterations_, double max_time_,
|
||||
const String & json_path_, size_t confidence_,
|
||||
const String & query_id_, bool continue_on_errors_,
|
||||
const String & query_id_, const String & query_to_execute_, bool continue_on_errors_,
|
||||
bool print_stacktrace_, const Settings & settings_)
|
||||
:
|
||||
concurrency(concurrency_), delay(delay_), queue(concurrency), randomize(randomize_),
|
||||
cumulative(cumulative_), max_iterations(max_iterations_), max_time(max_time_),
|
||||
json_path(json_path_), confidence(confidence_), query_id(query_id_),
|
||||
continue_on_errors(continue_on_errors_),
|
||||
query_to_execute(query_to_execute_), continue_on_errors(continue_on_errors_),
|
||||
print_stacktrace(print_stacktrace_), settings(settings_),
|
||||
shared_context(Context::createShared()), global_context(Context::createGlobal(shared_context.get())),
|
||||
pool(concurrency)
|
||||
@ -150,7 +150,8 @@ private:
|
||||
double max_time;
|
||||
String json_path;
|
||||
size_t confidence;
|
||||
std::string query_id;
|
||||
String query_id;
|
||||
String query_to_execute;
|
||||
bool continue_on_errors;
|
||||
bool print_stacktrace;
|
||||
const Settings & settings;
|
||||
@ -213,20 +214,28 @@ private:
|
||||
|
||||
void readQueries()
|
||||
{
|
||||
ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
|
||||
while (!in.eof())
|
||||
if (query_to_execute.empty())
|
||||
{
|
||||
std::string query;
|
||||
readText(query, in);
|
||||
assertChar('\n', in);
|
||||
ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
|
||||
if (!query.empty())
|
||||
queries.emplace_back(query);
|
||||
while (!in.eof())
|
||||
{
|
||||
String query;
|
||||
readText(query, in);
|
||||
assertChar('\n', in);
|
||||
|
||||
if (!query.empty())
|
||||
queries.emplace_back(std::move(query));
|
||||
}
|
||||
|
||||
if (queries.empty())
|
||||
throw Exception("Empty list of queries.", ErrorCodes::EMPTY_DATA_PASSED);
|
||||
}
|
||||
else
|
||||
{
|
||||
queries.emplace_back(query_to_execute);
|
||||
}
|
||||
|
||||
if (queries.empty())
|
||||
throw Exception("Empty list of queries.", ErrorCodes::EMPTY_DATA_PASSED);
|
||||
|
||||
std::cerr << "Loaded " << queries.size() << " queries.\n";
|
||||
}
|
||||
@ -559,6 +568,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help", "produce help message")
|
||||
("query", value<std::string>()->default_value(""), "query to execute")
|
||||
("concurrency,c", value<unsigned>()->default_value(1), "number of parallel queries")
|
||||
("delay,d", value<double>()->default_value(1), "delay between intermediate reports in seconds (set 0 to disable reports)")
|
||||
("stage", value<std::string>()->default_value("complete"), "request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation")
|
||||
@ -625,6 +635,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
options["json"].as<std::string>(),
|
||||
options["confidence"].as<size_t>(),
|
||||
options["query_id"].as<std::string>(),
|
||||
options["query"].as<std::string>(),
|
||||
options.count("continue_on_errors") > 0,
|
||||
print_stacktrace,
|
||||
settings);
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <Core/Types.h>
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <Core/ExternalTable.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
@ -475,9 +476,16 @@ private:
|
||||
/// The value of the option is used as the text of query (or of multiple queries).
|
||||
/// If stdin is not a terminal, INSERT data for the first query is read from it.
|
||||
/// - stdin is not a terminal. In this case queries are read from it.
|
||||
if (!stdin_is_a_tty || config().has("query"))
|
||||
/// - -qf (--queries-file) command line option is present.
|
||||
/// The value of the option is used as file with query (or of multiple queries) to execute.
|
||||
if (!stdin_is_a_tty || config().has("query") || config().has("queries-file"))
|
||||
is_interactive = false;
|
||||
|
||||
if (config().has("query") && config().has("queries-file"))
|
||||
{
|
||||
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
std::cout << std::fixed << std::setprecision(3);
|
||||
std::cerr << std::fixed << std::setprecision(3);
|
||||
|
||||
@ -786,8 +794,15 @@ private:
|
||||
{
|
||||
String text;
|
||||
|
||||
if (config().has("query"))
|
||||
text = config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
||||
if (config().has("queries-file"))
|
||||
{
|
||||
ReadBufferFromFile in(config().getString("queries-file"));
|
||||
readStringUntilEOF(text, in);
|
||||
processMultiQuery(text);
|
||||
return;
|
||||
}
|
||||
else if (config().has("query"))
|
||||
text = config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
||||
else
|
||||
{
|
||||
/// If 'query' parameter is not set, read a query from stdin.
|
||||
@ -2320,6 +2335,7 @@ public:
|
||||
"Suggestion limit for how many databases, tables and columns to fetch.")
|
||||
("multiline,m", "multiline")
|
||||
("multiquery,n", "multiquery")
|
||||
("queries-file,qf", po::value<std::string>(), "file path with queries to execute")
|
||||
("format,f", po::value<std::string>(), "default output format")
|
||||
("testmode,T", "enable test hints in comments")
|
||||
("ignore-error", "do not stop processing in multiquery mode")
|
||||
@ -2448,6 +2464,8 @@ public:
|
||||
config().setString("query_id", options["query_id"].as<std::string>());
|
||||
if (options.count("query"))
|
||||
config().setString("query", options["query"].as<std::string>());
|
||||
if (options.count("queries-file"))
|
||||
config().setString("queries-file", options["queries-file"].as<std::string>());
|
||||
if (options.count("database"))
|
||||
config().setString("database", options["database"].as<std::string>());
|
||||
if (options.count("pager"))
|
||||
|
@ -20,9 +20,11 @@
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <common/ErrorHandlers.h>
|
||||
@ -195,7 +197,7 @@ try
|
||||
ThreadStatus thread_status;
|
||||
UseSSL use_ssl;
|
||||
|
||||
if (!config().has("query") && !config().has("table-structure")) /// Nothing to process
|
||||
if (!config().has("query") && !config().has("table-structure") && !config().has("queries-file")) /// Nothing to process
|
||||
{
|
||||
if (config().hasOption("verbose"))
|
||||
std::cerr << "There are no queries to process." << '\n';
|
||||
@ -203,6 +205,11 @@ try
|
||||
return Application::EXIT_OK;
|
||||
}
|
||||
|
||||
if (config().has("query") && config().has("queries-file"))
|
||||
{
|
||||
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
shared_context = Context::createShared();
|
||||
global_context = std::make_unique<Context>(Context::createGlobal(shared_context.get()));
|
||||
global_context->makeGlobalContext();
|
||||
@ -340,7 +347,17 @@ std::string LocalServer::getInitialCreateTableQuery()
|
||||
void LocalServer::processQueries()
|
||||
{
|
||||
String initial_create_query = getInitialCreateTableQuery();
|
||||
String queries_str = initial_create_query + config().getRawString("query");
|
||||
String queries_str = initial_create_query;
|
||||
|
||||
if (config().has("query"))
|
||||
queries_str += config().getRawString("query");
|
||||
else
|
||||
{
|
||||
String queries_from_file;
|
||||
ReadBufferFromFile in(config().getString("queries-file"));
|
||||
readStringUntilEOF(queries_from_file, in);
|
||||
queries_str += queries_from_file;
|
||||
}
|
||||
|
||||
const auto & settings = global_context->getSettingsRef();
|
||||
|
||||
@ -505,6 +522,7 @@ void LocalServer::init(int argc, char ** argv)
|
||||
("help", "produce help message")
|
||||
("config-file,c", po::value<std::string>(), "config-file path")
|
||||
("query,q", po::value<std::string>(), "query")
|
||||
("queries-file, qf", po::value<std::string>(), "file path with queries to execute")
|
||||
("database,d", po::value<std::string>(), "database")
|
||||
|
||||
("table,N", po::value<std::string>(), "name of the initial table")
|
||||
@ -552,6 +570,8 @@ void LocalServer::init(int argc, char ** argv)
|
||||
config().setString("config-file", options["config-file"].as<std::string>());
|
||||
if (options.count("query"))
|
||||
config().setString("query", options["query"].as<std::string>());
|
||||
if (options.count("queries-file"))
|
||||
config().setString("queries-file", options["queries-file"].as<std::string>());
|
||||
if (options.count("database"))
|
||||
config().setString("default_database", options["database"].as<std::string>());
|
||||
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Common/Config/ConfigReloader.h>
|
||||
#include <Server/HTTPHandlerFactory.h>
|
||||
#include <Server/TestKeeperTCPHandlerFactory.h>
|
||||
#include "MetricsTransmitter.h"
|
||||
#include <Common/StatusFile.h>
|
||||
#include <Server/TCPHandlerFactory.h>
|
||||
@ -186,6 +187,85 @@ static std::string getUserName(uid_t user_id)
|
||||
return toString(user_id);
|
||||
}
|
||||
|
||||
Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log)
|
||||
{
|
||||
Poco::Net::SocketAddress socket_address;
|
||||
try
|
||||
{
|
||||
socket_address = Poco::Net::SocketAddress(host, port);
|
||||
}
|
||||
catch (const Poco::Net::DNSException & e)
|
||||
{
|
||||
const auto code = e.code();
|
||||
if (code == EAI_FAMILY
|
||||
#if defined(EAI_ADDRFAMILY)
|
||||
|| code == EAI_ADDRFAMILY
|
||||
#endif
|
||||
)
|
||||
{
|
||||
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. "
|
||||
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
|
||||
"specify IPv4 address to listen in <listen_host> element of configuration "
|
||||
"file. Example: <listen_host>0.0.0.0</listen_host>",
|
||||
host, e.code(), e.message());
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
return socket_address;
|
||||
}
|
||||
|
||||
Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
||||
{
|
||||
auto address = makeSocketAddress(host, port, &logger());
|
||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
||||
if (secure)
|
||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
||||
/// https://github.com/pocoproject/poco/pull/2257
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
else
|
||||
#endif
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
||||
#endif
|
||||
|
||||
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
void Server::createServer(const std::string & listen_host, const char * port_name, bool listen_try, CreateServerFunc && func) const
|
||||
{
|
||||
/// For testing purposes, user may omit tcp_port or http_port or https_port in configuration file.
|
||||
if (!config().has(port_name))
|
||||
return;
|
||||
|
||||
auto port = config().getInt(port_name);
|
||||
try
|
||||
{
|
||||
func(port);
|
||||
}
|
||||
catch (const Poco::Exception &)
|
||||
{
|
||||
std::string message = "Listen [" + listen_host + "]:" + std::to_string(port) + " failed: " + getCurrentExceptionMessage(false);
|
||||
|
||||
if (listen_try)
|
||||
{
|
||||
LOG_WARNING(&logger(), "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
|
||||
"specify not disabled IPv4 or IPv6 address to listen in <listen_host> element of configuration "
|
||||
"file. Example for disabled IPv6: <listen_host>0.0.0.0</listen_host> ."
|
||||
" Example for disabled IPv4: <listen_host>::</listen_host>",
|
||||
message);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception{message, ErrorCodes::NETWORK_ERROR};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Server::uninitialize()
|
||||
{
|
||||
logger().information("shutting down");
|
||||
@ -399,27 +479,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
StatusFile status{path + "status", StatusFile::write_full_info};
|
||||
|
||||
SCOPE_EXIT({
|
||||
/** Ask to cancel background jobs all table engines,
|
||||
* and also query_log.
|
||||
* It is important to do early, not in destructor of Context, because
|
||||
* table engines could use Context on destroy.
|
||||
*/
|
||||
LOG_INFO(log, "Shutting down storages.");
|
||||
|
||||
global_context->shutdown();
|
||||
|
||||
LOG_DEBUG(log, "Shut down storages.");
|
||||
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
*/
|
||||
global_context_ptr = nullptr;
|
||||
global_context.reset();
|
||||
shared_context.reset();
|
||||
LOG_DEBUG(log, "Destroyed global context.");
|
||||
});
|
||||
|
||||
/// Try to increase limit on number of open files.
|
||||
{
|
||||
rlimit rlim;
|
||||
@ -675,6 +734,71 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
total_memory_tracker.setDescription("(total)");
|
||||
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
||||
|
||||
Poco::Timespan keep_alive_timeout(config().getUInt("keep_alive_timeout", 10), 0);
|
||||
|
||||
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||
http_params->setTimeout(settings.http_receive_timeout);
|
||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||
|
||||
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
|
||||
|
||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||
|
||||
bool listen_try = config().getBool("listen_try", false);
|
||||
if (listen_hosts.empty())
|
||||
{
|
||||
listen_hosts.emplace_back("::1");
|
||||
listen_hosts.emplace_back("127.0.0.1");
|
||||
listen_try = true;
|
||||
}
|
||||
|
||||
for (const auto & listen_host : listen_hosts)
|
||||
{
|
||||
/// TCP TestKeeper
|
||||
createServer(listen_host, "test_keeper_server.tcp_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers_to_start_before_tables.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
new TestKeeperTCPHandlerFactory(*this),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
|
||||
LOG_INFO(log, "Listening for connections to fake zookeeper (tcp): {}", address.toString());
|
||||
});
|
||||
}
|
||||
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
server.start();
|
||||
|
||||
SCOPE_EXIT({
|
||||
/** Ask to cancel background jobs all table engines,
|
||||
* and also query_log.
|
||||
* It is important to do early, not in destructor of Context, because
|
||||
* table engines could use Context on destroy.
|
||||
*/
|
||||
LOG_INFO(log, "Shutting down storages.");
|
||||
|
||||
global_context->shutdown();
|
||||
|
||||
LOG_DEBUG(log, "Shut down storages.");
|
||||
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
server.stop();
|
||||
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
*/
|
||||
global_context_ptr = nullptr;
|
||||
global_context.reset();
|
||||
shared_context.reset();
|
||||
LOG_DEBUG(log, "Destroyed global context.");
|
||||
});
|
||||
|
||||
/// Set current database name before loading tables and databases because
|
||||
/// system logs may copy global context.
|
||||
global_context->setCurrentDatabaseNameInGlobalContext(default_database);
|
||||
@ -804,75 +928,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "TaskStats is not implemented for this OS. IO accounting will be disabled.");
|
||||
#endif
|
||||
|
||||
std::vector<ProtocolServerAdapter> servers;
|
||||
{
|
||||
Poco::Timespan keep_alive_timeout(config().getUInt("keep_alive_timeout", 10), 0);
|
||||
|
||||
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||
http_params->setTimeout(settings.http_receive_timeout);
|
||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||
|
||||
std::vector<ProtocolServerAdapter> servers;
|
||||
|
||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||
|
||||
bool listen_try = config().getBool("listen_try", false);
|
||||
if (listen_hosts.empty())
|
||||
{
|
||||
listen_hosts.emplace_back("::1");
|
||||
listen_hosts.emplace_back("127.0.0.1");
|
||||
listen_try = true;
|
||||
}
|
||||
|
||||
auto make_socket_address = [&](const std::string & host, UInt16 port)
|
||||
{
|
||||
Poco::Net::SocketAddress socket_address;
|
||||
try
|
||||
{
|
||||
socket_address = Poco::Net::SocketAddress(host, port);
|
||||
}
|
||||
catch (const Poco::Net::DNSException & e)
|
||||
{
|
||||
const auto code = e.code();
|
||||
if (code == EAI_FAMILY
|
||||
#if defined(EAI_ADDRFAMILY)
|
||||
|| code == EAI_ADDRFAMILY
|
||||
#endif
|
||||
)
|
||||
{
|
||||
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. "
|
||||
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
|
||||
"specify IPv4 address to listen in <listen_host> element of configuration "
|
||||
"file. Example: <listen_host>0.0.0.0</listen_host>",
|
||||
host, e.code(), e.message());
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
return socket_address;
|
||||
};
|
||||
|
||||
auto socket_bind_listen = [&](auto & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false)
|
||||
{
|
||||
auto address = make_socket_address(host, port);
|
||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
||||
if (secure)
|
||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
||||
/// https://github.com/pocoproject/poco/pull/2257
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
else
|
||||
#endif
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
||||
#endif
|
||||
|
||||
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
|
||||
|
||||
return address;
|
||||
};
|
||||
|
||||
/// This object will periodically calculate some metrics.
|
||||
AsynchronousMetrics async_metrics(*global_context,
|
||||
config().getUInt("asynchronous_metrics_update_period_s", 60));
|
||||
@ -880,41 +937,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
for (const auto & listen_host : listen_hosts)
|
||||
{
|
||||
auto create_server = [&](const char * port_name, auto && func)
|
||||
{
|
||||
/// For testing purposes, user may omit tcp_port or http_port or https_port in configuration file.
|
||||
if (!config().has(port_name))
|
||||
return;
|
||||
|
||||
auto port = config().getInt(port_name);
|
||||
try
|
||||
{
|
||||
func(port);
|
||||
}
|
||||
catch (const Poco::Exception &)
|
||||
{
|
||||
std::string message = "Listen [" + listen_host + "]:" + std::to_string(port) + " failed: " + getCurrentExceptionMessage(false);
|
||||
|
||||
if (listen_try)
|
||||
{
|
||||
LOG_WARNING(log, "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
|
||||
"specify not disabled IPv4 or IPv6 address to listen in <listen_host> element of configuration "
|
||||
"file. Example for disabled IPv6: <listen_host>0.0.0.0</listen_host> ."
|
||||
" Example for disabled IPv4: <listen_host>::</listen_host>",
|
||||
message);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception{message, ErrorCodes::NETWORK_ERROR};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// HTTP
|
||||
create_server("http_port", [&](UInt16 port)
|
||||
createServer(listen_host, "http_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port);
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
|
||||
@ -925,11 +952,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
|
||||
/// HTTPS
|
||||
create_server("https_port", [&](UInt16 port)
|
||||
createServer(listen_host, "https_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
|
||||
@ -944,14 +971,14 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
|
||||
/// TCP
|
||||
create_server("tcp_port", [&](UInt16 port)
|
||||
createServer(listen_host, "tcp_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port);
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this),
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
@ -959,16 +986,32 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "Listening for connections with native protocol (tcp): {}", address.toString());
|
||||
});
|
||||
|
||||
/// TCP with SSL
|
||||
create_server("tcp_port_secure", [&](UInt16 port)
|
||||
/// TCP with PROXY protocol, see https://github.com/wolfeidau/proxyv2/blob/master/docs/proxy-protocol.txt
|
||||
createServer(listen_host, "tcp_with_proxy_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure= */ true),
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
|
||||
LOG_INFO(log, "Listening for connections with native protocol (tcp) with PROXY: {}", address.toString());
|
||||
});
|
||||
|
||||
/// TCP with SSL
|
||||
createServer(listen_host, "tcp_port_secure", listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
@ -981,10 +1024,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
|
||||
/// Interserver IO HTTP
|
||||
create_server("interserver_http_port", [&](UInt16 port)
|
||||
createServer(listen_host, "interserver_http_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port);
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
|
||||
@ -993,11 +1036,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "Listening for replica communication (interserver): http://{}", address.toString());
|
||||
});
|
||||
|
||||
create_server("interserver_https_port", [&](UInt16 port)
|
||||
createServer(listen_host, "interserver_https_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
|
||||
@ -1011,10 +1054,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
#endif
|
||||
});
|
||||
|
||||
create_server("mysql_port", [&](UInt16 port)
|
||||
createServer(listen_host, "mysql_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
@ -1026,10 +1069,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "Listening for MySQL compatibility protocol: {}", address.toString());
|
||||
});
|
||||
|
||||
create_server("postgresql_port", [&](UInt16 port)
|
||||
createServer(listen_host, "postgresql_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
@ -1042,19 +1085,19 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
|
||||
#if USE_GRPC
|
||||
create_server("grpc_port", [&](UInt16 port)
|
||||
createServer(listen_host, "grpc_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::SocketAddress server_address(listen_host, port);
|
||||
servers.emplace_back(std::make_unique<GRPCServer>(*this, make_socket_address(listen_host, port)));
|
||||
servers.emplace_back(std::make_unique<GRPCServer>(*this, makeSocketAddress(listen_host, port, log)));
|
||||
LOG_INFO(log, "Listening for gRPC protocol: " + server_address.toString());
|
||||
});
|
||||
#endif
|
||||
|
||||
/// Prometheus (if defined and not setup yet with http_port)
|
||||
create_server("prometheus.port", [&](UInt16 port)
|
||||
createServer(listen_host, "prometheus.port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port);
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
|
||||
@ -1078,6 +1121,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
int level = level_str.empty() ? INT_MAX : Poco::Logger::parseLevel(level_str);
|
||||
setTextLog(global_context->getTextLog(), level);
|
||||
}
|
||||
|
||||
buildLoggers(config(), logger());
|
||||
|
||||
main_config_reloader->start();
|
||||
@ -1124,7 +1168,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
current_connections = 0;
|
||||
for (auto & server : servers)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
}
|
||||
if (!current_connections)
|
||||
break;
|
||||
sleep_current_ms += sleep_one_ms;
|
||||
|
@ -14,6 +14,13 @@
|
||||
* 3. Interserver HTTP - for replication.
|
||||
*/
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
class ServerSocket;
|
||||
}
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -57,6 +64,13 @@ protected:
|
||||
|
||||
private:
|
||||
Context * global_context_ptr = nullptr;
|
||||
|
||||
private:
|
||||
|
||||
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const;
|
||||
|
||||
using CreateServerFunc = std::function<void(UInt16)>;
|
||||
void createServer(const std::string & listen_host, const char * port_name, bool listen_try, CreateServerFunc && func) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
1
programs/server/config.d/tcp_with_proxy.xml
Symbolic link
1
programs/server/config.d/tcp_with_proxy.xml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../tests/config/config.d/tcp_with_proxy.xml
|
@ -64,11 +64,18 @@
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
<mysql_port>9004</mysql_port>
|
||||
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_port_secure>9440</tcp_port_secure>
|
||||
-->
|
||||
|
||||
<!-- TCP with PROXY protocol (PROXY header sent for every connection) -->
|
||||
<!--
|
||||
<tcp_with_proxy_port>9010</tcp_with_proxy_port>
|
||||
-->
|
||||
|
||||
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
@ -137,29 +144,32 @@
|
||||
<!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) -->
|
||||
<!-- <grpc_port>9100</grpc_port> -->
|
||||
<grpc>
|
||||
<enable_ssl>true</enable_ssl>
|
||||
<enable_ssl>false</enable_ssl>
|
||||
|
||||
<!-- The following two files are used only if enable_ssl=1
|
||||
<!-- The following two files are used only if enable_ssl=1 -->
|
||||
<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
|
||||
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file> -->
|
||||
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
|
||||
|
||||
<!-- Whether server will request client for a certificate
|
||||
<ssl_require_client_auth>true</ssl_require_client_auth> -->
|
||||
<!-- Whether server will request client for a certificate -->
|
||||
<ssl_require_client_auth>false</ssl_require_client_auth>
|
||||
|
||||
<!-- The following file is used only if ssl_require_client_auth=1
|
||||
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file> -->
|
||||
<!-- The following file is used only if ssl_require_client_auth=1 -->
|
||||
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
|
||||
|
||||
<!-- Default compression algorithm (applied if client doesn't specify another algorithm).
|
||||
Supported algorithms: none, deflate, gzip, stream_gzip
|
||||
<compression>gzip</compression> -->
|
||||
Supported algorithms: none, deflate, gzip, stream_gzip -->
|
||||
<compression>deflate</compression>
|
||||
|
||||
<!-- Default compression level (applied if client doesn't specify another level).
|
||||
Supported levels: none, low, medium, high
|
||||
<compression_level>high</compression_level> -->
|
||||
Supported levels: none, low, medium, high -->
|
||||
<compression_level>medium</compression_level>
|
||||
|
||||
<!-- Send/receive message size limits in bytes. -1 means unlimited
|
||||
<!-- Send/receive message size limits in bytes. -1 means unlimited -->
|
||||
<max_send_message_size>-1</max_send_message_size>
|
||||
<max_receive_message_size>4194304</max_receive_message_size> -->
|
||||
<max_receive_message_size>-1</max_receive_message_size>
|
||||
|
||||
<!-- Enable if you want very detailed logs -->
|
||||
<verbose_logs>false</verbose_logs>
|
||||
</grpc>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <common/find_symbols.h>
|
||||
#include <Poco/ExpireCache.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include <filesystem>
|
||||
#include <mutex>
|
||||
|
||||
@ -419,6 +421,18 @@ std::shared_ptr<const ContextAccess> AccessControlManager::getContextAccess(
|
||||
params.http_method = client_info.http_method;
|
||||
params.address = client_info.current_address.host();
|
||||
params.quota_key = client_info.quota_key;
|
||||
|
||||
/// Extract the last entry from comma separated list of X-Forwarded-For addresses.
|
||||
/// Only the last proxy can be trusted (if any).
|
||||
Strings forwarded_addresses;
|
||||
boost::split(forwarded_addresses, client_info.forwarded_for, boost::is_any_of(","));
|
||||
if (!forwarded_addresses.empty())
|
||||
{
|
||||
String & last_forwarded_address = forwarded_addresses.back();
|
||||
boost::trim(last_forwarded_address);
|
||||
params.forwarded_address = last_forwarded_address;
|
||||
}
|
||||
|
||||
return getContextAccess(params);
|
||||
}
|
||||
|
||||
@ -444,9 +458,14 @@ std::shared_ptr<const EnabledRowPolicies> AccessControlManager::getEnabledRowPol
|
||||
|
||||
|
||||
std::shared_ptr<const EnabledQuota> AccessControlManager::getEnabledQuota(
|
||||
const UUID & user_id, const String & user_name, const boost::container::flat_set<UUID> & enabled_roles, const Poco::Net::IPAddress & address, const String & custom_quota_key) const
|
||||
const UUID & user_id,
|
||||
const String & user_name,
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const Poco::Net::IPAddress & address,
|
||||
const String & forwarded_address,
|
||||
const String & custom_quota_key) const
|
||||
{
|
||||
return quota_cache->getEnabledQuota(user_id, user_name, enabled_roles, address, custom_quota_key);
|
||||
return quota_cache->getEnabledQuota(user_id, user_name, enabled_roles, address, forwarded_address, custom_quota_key);
|
||||
}
|
||||
|
||||
|
||||
|
@ -135,6 +135,7 @@ public:
|
||||
const String & user_name,
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const Poco::Net::IPAddress & address,
|
||||
const String & forwarded_address,
|
||||
const String & custom_quota_key) const;
|
||||
|
||||
std::vector<QuotaUsage> getAllQuotasUsage() const;
|
||||
|
@ -127,6 +127,7 @@ enum class AccessType
|
||||
M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \
|
||||
M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \
|
||||
M(SYSTEM_RELOAD_SYMBOLS, "RELOAD SYMBOLS", GLOBAL, SYSTEM_RELOAD) \
|
||||
M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \
|
||||
M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\
|
||||
M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \
|
||||
|
@ -258,9 +258,12 @@ void ContextAccess::setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> &
|
||||
{
|
||||
assert(roles_info_);
|
||||
roles_info = roles_info_;
|
||||
enabled_row_policies = manager->getEnabledRowPolicies(*params.user_id, roles_info->enabled_roles);
|
||||
enabled_quota = manager->getEnabledQuota(*params.user_id, user_name, roles_info->enabled_roles, params.address, params.quota_key);
|
||||
enabled_settings = manager->getEnabledSettings(*params.user_id, user->settings, roles_info->enabled_roles, roles_info->settings_from_enabled_roles);
|
||||
enabled_row_policies = manager->getEnabledRowPolicies(
|
||||
*params.user_id, roles_info->enabled_roles);
|
||||
enabled_quota = manager->getEnabledQuota(
|
||||
*params.user_id, user_name, roles_info->enabled_roles, params.address, params.forwarded_address, params.quota_key);
|
||||
enabled_settings = manager->getEnabledSettings(
|
||||
*params.user_id, user->settings, roles_info->enabled_roles, roles_info->settings_from_enabled_roles);
|
||||
calculateAccessRights();
|
||||
}
|
||||
|
||||
|
@ -41,9 +41,16 @@ struct ContextAccessParams
|
||||
ClientInfo::Interface interface = ClientInfo::Interface::TCP;
|
||||
ClientInfo::HTTPMethod http_method = ClientInfo::HTTPMethod::UNKNOWN;
|
||||
Poco::Net::IPAddress address;
|
||||
String forwarded_address;
|
||||
String quota_key;
|
||||
|
||||
auto toTuple() const { return std::tie(user_id, current_roles, use_default_roles, readonly, allow_ddl, allow_introspection, current_database, interface, http_method, address, quota_key); }
|
||||
auto toTuple() const
|
||||
{
|
||||
return std::tie(
|
||||
user_id, current_roles, use_default_roles, readonly, allow_ddl, allow_introspection,
|
||||
current_database, interface, http_method, address, forwarded_address, quota_key);
|
||||
}
|
||||
|
||||
friend bool operator ==(const ContextAccessParams & lhs, const ContextAccessParams & rhs) { return lhs.toTuple() == rhs.toTuple(); }
|
||||
friend bool operator !=(const ContextAccessParams & lhs, const ContextAccessParams & rhs) { return !(lhs == rhs); }
|
||||
friend bool operator <(const ContextAccessParams & lhs, const ContextAccessParams & rhs) { return lhs.toTuple() < rhs.toTuple(); }
|
||||
|
@ -25,9 +25,10 @@ public:
|
||||
String user_name;
|
||||
boost::container::flat_set<UUID> enabled_roles;
|
||||
Poco::Net::IPAddress client_address;
|
||||
String forwarded_address;
|
||||
String client_key;
|
||||
|
||||
auto toTuple() const { return std::tie(user_id, enabled_roles, user_name, client_address, client_key); }
|
||||
auto toTuple() const { return std::tie(user_id, enabled_roles, user_name, client_address, forwarded_address, client_key); }
|
||||
friend bool operator ==(const Params & lhs, const Params & rhs) { return lhs.toTuple() == rhs.toTuple(); }
|
||||
friend bool operator !=(const Params & lhs, const Params & rhs) { return !(lhs == rhs); }
|
||||
friend bool operator <(const Params & lhs, const Params & rhs) { return lhs.toTuple() < rhs.toTuple(); }
|
||||
|
@ -76,6 +76,7 @@ struct Quota : public IAccessEntity
|
||||
NONE, /// All users share the same quota.
|
||||
USER_NAME, /// Connections with the same user name share the same quota.
|
||||
IP_ADDRESS, /// Connections from the same IP share the same quota.
|
||||
FORWARDED_IP_ADDRESS, /// Use X-Forwarded-For HTTP header instead of IP address.
|
||||
CLIENT_KEY, /// Client should explicitly supply a key to use.
|
||||
CLIENT_KEY_OR_USER_NAME, /// Same as CLIENT_KEY, but use USER_NAME if the client doesn't supply a key.
|
||||
CLIENT_KEY_OR_IP_ADDRESS, /// Same as CLIENT_KEY, but use IP_ADDRESS if the client doesn't supply a key.
|
||||
@ -205,12 +206,16 @@ inline const Quota::KeyTypeInfo & Quota::KeyTypeInfo::get(KeyType type)
|
||||
if (tokens.size() > 1)
|
||||
{
|
||||
for (const auto & token : tokens)
|
||||
{
|
||||
for (auto kt : ext::range(KeyType::MAX))
|
||||
{
|
||||
if (KeyTypeInfo::get(kt).name == token)
|
||||
{
|
||||
init_base_types.push_back(kt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return KeyTypeInfo{raw_name_, std::move(init_name), std::move(init_base_types)};
|
||||
};
|
||||
@ -232,6 +237,11 @@ inline const Quota::KeyTypeInfo & Quota::KeyTypeInfo::get(KeyType type)
|
||||
static const auto info = make_info("IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case KeyType::FORWARDED_IP_ADDRESS:
|
||||
{
|
||||
static const auto info = make_info("FORWARDED_IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case KeyType::CLIENT_KEY:
|
||||
{
|
||||
static const auto info = make_info("CLIENT_KEY");
|
||||
|
@ -48,11 +48,21 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const
|
||||
switch (quota->key_type)
|
||||
{
|
||||
case KeyType::NONE:
|
||||
{
|
||||
return "";
|
||||
}
|
||||
case KeyType::USER_NAME:
|
||||
{
|
||||
return params.user_name;
|
||||
}
|
||||
case KeyType::IP_ADDRESS:
|
||||
{
|
||||
return params.client_address.toString();
|
||||
}
|
||||
case KeyType::FORWARDED_IP_ADDRESS:
|
||||
{
|
||||
return params.forwarded_address;
|
||||
}
|
||||
case KeyType::CLIENT_KEY:
|
||||
{
|
||||
if (!params.client_key.empty())
|
||||
@ -170,7 +180,7 @@ QuotaCache::QuotaCache(const AccessControlManager & access_control_manager_)
|
||||
QuotaCache::~QuotaCache() = default;
|
||||
|
||||
|
||||
std::shared_ptr<const EnabledQuota> QuotaCache::getEnabledQuota(const UUID & user_id, const String & user_name, const boost::container::flat_set<UUID> & enabled_roles, const Poco::Net::IPAddress & client_address, const String & client_key)
|
||||
std::shared_ptr<const EnabledQuota> QuotaCache::getEnabledQuota(const UUID & user_id, const String & user_name, const boost::container::flat_set<UUID> & enabled_roles, const Poco::Net::IPAddress & client_address, const String & forwarded_address, const String & client_key)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
ensureAllQuotasRead();
|
||||
@ -180,6 +190,7 @@ std::shared_ptr<const EnabledQuota> QuotaCache::getEnabledQuota(const UUID & use
|
||||
params.user_name = user_name;
|
||||
params.enabled_roles = enabled_roles;
|
||||
params.client_address = client_address;
|
||||
params.forwarded_address = forwarded_address;
|
||||
params.client_key = client_key;
|
||||
auto it = enabled_quotas.find(params);
|
||||
if (it != enabled_quotas.end())
|
||||
|
@ -20,7 +20,14 @@ public:
|
||||
QuotaCache(const AccessControlManager & access_control_manager_);
|
||||
~QuotaCache();
|
||||
|
||||
std::shared_ptr<const EnabledQuota> getEnabledQuota(const UUID & user_id, const String & user_name, const boost::container::flat_set<UUID> & enabled_roles, const Poco::Net::IPAddress & address, const String & client_key);
|
||||
std::shared_ptr<const EnabledQuota> getEnabledQuota(
|
||||
const UUID & user_id,
|
||||
const String & user_name,
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const Poco::Net::IPAddress & address,
|
||||
const String & forwarded_address,
|
||||
const String & client_key);
|
||||
|
||||
std::vector<QuotaUsage> getAllQuotasUsage() const;
|
||||
|
||||
private:
|
||||
|
@ -215,6 +215,8 @@ namespace
|
||||
String quota_config = "quotas." + quota_name;
|
||||
if (config.has(quota_config + ".keyed_by_ip"))
|
||||
quota->key_type = KeyType::IP_ADDRESS;
|
||||
else if (config.has(quota_config + ".keyed_by_forwarded_ip"))
|
||||
quota->key_type = KeyType::FORWARDED_IP_ADDRESS;
|
||||
else if (config.has(quota_config + ".keyed"))
|
||||
quota->key_type = KeyType::CLIENT_KEY_OR_USER_NAME;
|
||||
else
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include "registerAggregateFunctions.h"
|
||||
|
||||
|
||||
|
@ -100,6 +100,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(const std::string & name, c
|
||||
if (which.idx == TypeIndex::Decimal32) return std::make_shared<Function<Decimal32, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Decimal64) return std::make_shared<Function<Decimal64, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Decimal128) return std::make_shared<Function<Decimal128, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::DateTime64) return std::make_shared<Function<DateTime64, false>>(argument_types, params);
|
||||
//if (which.idx == TypeIndex::Decimal256) return std::make_shared<Function<Decimal256, false>>(argument_types, params);
|
||||
}
|
||||
|
||||
|
@ -237,6 +237,8 @@ template <typename T, typename TResult, typename Data, AggregateFunctionSumType
|
||||
class AggregateFunctionSum final : public IAggregateFunctionDataHelper<Data, AggregateFunctionSum<T, TResult, Data, Type>>
|
||||
{
|
||||
public:
|
||||
static constexpr bool DateTime64Supported = false;
|
||||
|
||||
using ResultDataType = std::conditional_t<IsDecimalNumber<T>, DataTypeDecimal<TResult>, DataTypeNumber<TResult>>;
|
||||
using ColVecType = std::conditional_t<IsDecimalNumber<T>, ColumnDecimal<T>, ColumnVector<T>>;
|
||||
using ColVecResult = std::conditional_t<IsDecimalNumber<T>, ColumnDecimal<TResult>, ColumnVector<TResult>>;
|
||||
|
@ -135,6 +135,8 @@ static IAggregateFunction * createWithDecimalType(const IDataType & argument_typ
|
||||
if (which.idx == TypeIndex::Decimal64) return new AggregateFunctionTemplate<Decimal64>(std::forward<TArgs>(args)...);
|
||||
if (which.idx == TypeIndex::Decimal128) return new AggregateFunctionTemplate<Decimal128>(std::forward<TArgs>(args)...);
|
||||
if (which.idx == TypeIndex::Decimal256) return new AggregateFunctionTemplate<Decimal256>(std::forward<TArgs>(args)...);
|
||||
if constexpr (AggregateFunctionTemplate<DateTime64>::DateTime64Supported)
|
||||
if (which.idx == TypeIndex::DateTime64) return new AggregateFunctionTemplate<DateTime64>(std::forward<TArgs>(args)...);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -146,6 +148,8 @@ static IAggregateFunction * createWithDecimalType(const IDataType & argument_typ
|
||||
if (which.idx == TypeIndex::Decimal64) return new AggregateFunctionTemplate<Decimal64, Data>(std::forward<TArgs>(args)...);
|
||||
if (which.idx == TypeIndex::Decimal128) return new AggregateFunctionTemplate<Decimal128, Data>(std::forward<TArgs>(args)...);
|
||||
if (which.idx == TypeIndex::Decimal256) return new AggregateFunctionTemplate<Decimal256, Data>(std::forward<TArgs>(args)...);
|
||||
if constexpr (AggregateFunctionTemplate<DateTime64, Data>::DateTime64Supported)
|
||||
if (which.idx == TypeIndex::DateTime64) return new AggregateFunctionTemplate<DateTime64, Data>(std::forward<TArgs>(args)...);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -310,6 +310,9 @@ protected:
|
||||
static const Data & data(ConstAggregateDataPtr place) { return *reinterpret_cast<const Data*>(place); }
|
||||
|
||||
public:
|
||||
// Derived class can `override` this to flag that DateTime64 is not supported.
|
||||
static constexpr bool DateTime64Supported = true;
|
||||
|
||||
IAggregateFunctionDataHelper(const DataTypes & argument_types_, const Array & parameters_)
|
||||
: IAggregateFunctionHelper<Derived>(argument_types_, parameters_) {}
|
||||
|
||||
|
@ -403,6 +403,9 @@ if (USE_MSGPACK)
|
||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
target_link_libraries (clickhouse_common_io PUBLIC ${FAST_FLOAT_LIBRARY})
|
||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${FAST_FLOAT_INCLUDE_DIR})
|
||||
|
||||
if (USE_ORC)
|
||||
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})
|
||||
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR} ${CMAKE_BINARY_DIR}/contrib/orc/c++/include)
|
||||
|
@ -370,4 +370,5 @@ template class ColumnDecimal<Decimal32>;
|
||||
template class ColumnDecimal<Decimal64>;
|
||||
template class ColumnDecimal<Decimal128>;
|
||||
template class ColumnDecimal<Decimal256>;
|
||||
template class ColumnDecimal<DateTime64>;
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ LazyPipeFDs::~LazyPipeFDs()
|
||||
}
|
||||
|
||||
|
||||
void LazyPipeFDs::setNonBlocking()
|
||||
void LazyPipeFDs::setNonBlockingWrite()
|
||||
{
|
||||
int flags = fcntl(fds_rw[1], F_GETFL, 0);
|
||||
if (-1 == flags)
|
||||
@ -79,6 +79,21 @@ void LazyPipeFDs::setNonBlocking()
|
||||
throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
}
|
||||
|
||||
void LazyPipeFDs::setNonBlockingRead()
|
||||
{
|
||||
int flags = fcntl(fds_rw[0], F_GETFL, 0);
|
||||
if (-1 == flags)
|
||||
throwFromErrno("Cannot get file status flags of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
if (-1 == fcntl(fds_rw[0], F_SETFL, flags | O_NONBLOCK))
|
||||
throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
}
|
||||
|
||||
void LazyPipeFDs::setNonBlockingReadWrite()
|
||||
{
|
||||
setNonBlockingRead();
|
||||
setNonBlockingWrite();
|
||||
}
|
||||
|
||||
void LazyPipeFDs::tryIncreaseSize(int desired_size)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
|
@ -17,7 +17,12 @@ struct LazyPipeFDs
|
||||
void open();
|
||||
void close();
|
||||
|
||||
void setNonBlocking();
|
||||
/// Set O_NONBLOCK to different ends of pipe preserving existing flags.
|
||||
/// Throws an exception if fcntl was not successful.
|
||||
void setNonBlockingWrite();
|
||||
void setNonBlockingRead();
|
||||
void setNonBlockingReadWrite();
|
||||
|
||||
void tryIncreaseSize(int desired_size);
|
||||
|
||||
~LazyPipeFDs();
|
||||
|
@ -195,7 +195,8 @@ void StackTrace::symbolize(const StackTrace::FramePointers & frame_pointers, siz
|
||||
{
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__) && !defined(ARCADIA_BUILD)
|
||||
|
||||
const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance();
|
||||
auto symbol_index_ptr = DB::SymbolIndex::instance();
|
||||
const DB::SymbolIndex & symbol_index = *symbol_index_ptr;
|
||||
std::unordered_map<std::string, DB::Dwarf> dwarfs;
|
||||
|
||||
for (size_t i = 0; i < offset; ++i)
|
||||
@ -316,7 +317,8 @@ static void toStringEveryLineImpl(
|
||||
return callback("<Empty trace>");
|
||||
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__)
|
||||
const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance();
|
||||
auto symbol_index_ptr = DB::SymbolIndex::instance();
|
||||
const DB::SymbolIndex & symbol_index = *symbol_index_ptr;
|
||||
std::unordered_map<std::string, DB::Dwarf> dwarfs;
|
||||
|
||||
std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
|
@ -300,13 +300,13 @@ void collectSymbolsFromELF(dl_phdr_info * info,
|
||||
|
||||
String our_build_id = getBuildIDFromProgramHeaders(info);
|
||||
|
||||
/// If the name is empty - it's main executable.
|
||||
/// Find a elf file for the main executable.
|
||||
|
||||
/// If the name is empty and there is a non-empty build-id - it's main executable.
|
||||
/// Find a elf file for the main executable and set the build-id.
|
||||
if (object_name.empty())
|
||||
{
|
||||
object_name = "/proc/self/exe";
|
||||
build_id = our_build_id;
|
||||
if (build_id.empty())
|
||||
build_id = our_build_id;
|
||||
}
|
||||
|
||||
std::error_code ec;
|
||||
@ -316,9 +316,16 @@ void collectSymbolsFromELF(dl_phdr_info * info,
|
||||
return;
|
||||
|
||||
/// Debug info and symbol table sections may be split to separate binary.
|
||||
std::filesystem::path local_debug_info_path = canonical_path.parent_path() / canonical_path.stem();
|
||||
local_debug_info_path += ".debug";
|
||||
std::filesystem::path debug_info_path = std::filesystem::path("/usr/lib/debug") / canonical_path.relative_path();
|
||||
|
||||
object_name = std::filesystem::exists(debug_info_path) ? debug_info_path : canonical_path;
|
||||
if (std::filesystem::exists(local_debug_info_path))
|
||||
object_name = local_debug_info_path;
|
||||
else if (std::filesystem::exists(debug_info_path))
|
||||
object_name = debug_info_path;
|
||||
else
|
||||
object_name = canonical_path;
|
||||
|
||||
/// But we have to compare Build ID to check that debug info corresponds to the same executable.
|
||||
|
||||
@ -434,10 +441,12 @@ String SymbolIndex::getBuildIDHex() const
|
||||
return build_id_hex;
|
||||
}
|
||||
|
||||
SymbolIndex & SymbolIndex::instance()
|
||||
MultiVersion<SymbolIndex>::Version SymbolIndex::instance(bool reload)
|
||||
{
|
||||
static SymbolIndex instance;
|
||||
return instance;
|
||||
static MultiVersion<SymbolIndex> instance(std::unique_ptr<SymbolIndex>(new SymbolIndex));
|
||||
if (reload)
|
||||
instance.set(std::unique_ptr<SymbolIndex>(new SymbolIndex));
|
||||
return instance.get();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Common/Elf.h>
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
#include <Common/MultiVersion.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -21,7 +22,7 @@ protected:
|
||||
SymbolIndex() { update(); }
|
||||
|
||||
public:
|
||||
static SymbolIndex & instance();
|
||||
static MultiVersion<SymbolIndex>::Version instance(bool reload = false);
|
||||
|
||||
struct Symbol
|
||||
{
|
||||
|
@ -36,7 +36,7 @@ TraceCollector::TraceCollector(std::shared_ptr<TraceLog> trace_log_)
|
||||
/** Turn write end of pipe to non-blocking mode to avoid deadlocks
|
||||
* when QueryProfiler is invoked under locks and TraceCollector cannot pull data from pipe.
|
||||
*/
|
||||
pipe.setNonBlocking();
|
||||
pipe.setNonBlockingWrite();
|
||||
pipe.tryIncreaseSize(1 << 20);
|
||||
|
||||
thread = ThreadFromGlobalPool(&TraceCollector::run, this);
|
||||
|
@ -31,7 +31,6 @@ using Undo = std::function<void()>;
|
||||
|
||||
struct TestKeeperRequest : virtual Request
|
||||
{
|
||||
virtual bool isMutable() const { return false; }
|
||||
virtual ResponsePtr createResponse() const = 0;
|
||||
virtual std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const = 0;
|
||||
virtual void processWatches(TestKeeper::Watches & /*watches*/, TestKeeper::Watches & /*list_watches*/) const {}
|
||||
@ -85,7 +84,6 @@ struct TestKeeperRemoveRequest final : RemoveRequest, TestKeeperRequest
|
||||
{
|
||||
TestKeeperRemoveRequest() = default;
|
||||
explicit TestKeeperRemoveRequest(const RemoveRequest & base) : RemoveRequest(base) {}
|
||||
bool isMutable() const override { return true; }
|
||||
ResponsePtr createResponse() const override;
|
||||
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||
|
||||
@ -112,7 +110,6 @@ struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest
|
||||
{
|
||||
TestKeeperSetRequest() = default;
|
||||
explicit TestKeeperSetRequest(const SetRequest & base) : SetRequest(base) {}
|
||||
bool isMutable() const override { return true; }
|
||||
ResponsePtr createResponse() const override;
|
||||
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||
|
||||
|
@ -125,8 +125,6 @@ private:
|
||||
Watches watches;
|
||||
Watches list_watches; /// Watches for 'list' request (watches on children).
|
||||
|
||||
void createWatchCallBack(const String & path);
|
||||
|
||||
using RequestsQueue = ConcurrentBoundedQueue<RequestInfo>;
|
||||
RequestsQueue requests_queue{1};
|
||||
|
||||
|
806
src/Common/ZooKeeper/TestKeeperStorage.cpp
Normal file
806
src/Common/ZooKeeper/TestKeeperStorage.cpp
Normal file
@ -0,0 +1,806 @@
|
||||
#include <Common/ZooKeeper/TestKeeperStorage.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <mutex>
|
||||
#include <functional>
|
||||
#include <common/logger_useful.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace zkutil
|
||||
{
|
||||
|
||||
using namespace DB;
|
||||
|
||||
static String parentPath(const String & path)
|
||||
{
|
||||
auto rslash_pos = path.rfind('/');
|
||||
if (rslash_pos > 0)
|
||||
return path.substr(0, rslash_pos);
|
||||
return "/";
|
||||
}
|
||||
|
||||
static String baseName(const String & path)
|
||||
{
|
||||
auto rslash_pos = path.rfind('/');
|
||||
return path.substr(rslash_pos + 1);
|
||||
}
|
||||
|
||||
static void processWatchesImpl(const String & path, TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches, Coordination::Event event_type)
|
||||
{
|
||||
auto it = watches.find(path);
|
||||
if (it != watches.end())
|
||||
{
|
||||
std::shared_ptr<Coordination::ZooKeeperWatchResponse> watch_response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
|
||||
watch_response->path = path;
|
||||
watch_response->xid = -1;
|
||||
watch_response->zxid = -1;
|
||||
watch_response->type = event_type;
|
||||
watch_response->state = Coordination::State::CONNECTED;
|
||||
for (auto & watcher : it->second)
|
||||
if (watcher.watch_callback)
|
||||
watcher.watch_callback(watch_response);
|
||||
|
||||
watches.erase(it);
|
||||
}
|
||||
|
||||
auto parent_path = parentPath(path);
|
||||
it = list_watches.find(parent_path);
|
||||
if (it != list_watches.end())
|
||||
{
|
||||
std::shared_ptr<Coordination::ZooKeeperWatchResponse> watch_list_response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
|
||||
watch_list_response->path = parent_path;
|
||||
watch_list_response->xid = -1;
|
||||
watch_list_response->zxid = -1;
|
||||
watch_list_response->type = Coordination::Event::CHILD;
|
||||
watch_list_response->state = Coordination::State::CONNECTED;
|
||||
for (auto & watcher : it->second)
|
||||
if (watcher.watch_callback)
|
||||
watcher.watch_callback(watch_list_response);
|
||||
|
||||
list_watches.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
TestKeeperStorage::TestKeeperStorage()
|
||||
{
|
||||
container.emplace("/", Node());
|
||||
|
||||
processing_thread = ThreadFromGlobalPool([this] { processingThread(); });
|
||||
}
|
||||
|
||||
using Undo = std::function<void()>;
|
||||
|
||||
struct TestKeeperStorageRequest
|
||||
{
|
||||
Coordination::ZooKeeperRequestPtr zk_request;
|
||||
|
||||
explicit TestKeeperStorageRequest(const Coordination::ZooKeeperRequestPtr & zk_request_)
|
||||
: zk_request(zk_request_)
|
||||
{}
|
||||
virtual std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t zxid, int64_t session_id) const = 0;
|
||||
virtual void processWatches(TestKeeperStorage::Watches & /*watches*/, TestKeeperStorage::Watches & /*list_watches*/) const {}
|
||||
|
||||
virtual ~TestKeeperStorageRequest() = default;
|
||||
};
|
||||
|
||||
struct TestKeeperStorageHeartbeatRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & /* container */, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /* zxid */, int64_t /* session_id */) const override
|
||||
{
|
||||
return {zk_request->makeResponse(), {}};
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct TestKeeperStorageCreateRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
|
||||
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
|
||||
{
|
||||
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::CREATED);
|
||||
}
|
||||
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t zxid, int64_t session_id) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Undo undo;
|
||||
Coordination::ZooKeeperCreateResponse & response = dynamic_cast<Coordination::ZooKeeperCreateResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperCreateRequest & request = dynamic_cast<Coordination::ZooKeeperCreateRequest &>(*zk_request);
|
||||
|
||||
if (container.count(request.path))
|
||||
{
|
||||
response.error = Coordination::Error::ZNODEEXISTS;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto it = container.find(parentPath(request.path));
|
||||
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else if (it->second.is_ephemeral)
|
||||
{
|
||||
response.error = Coordination::Error::ZNOCHILDRENFOREPHEMERALS;
|
||||
}
|
||||
else
|
||||
{
|
||||
TestKeeperStorage::Node created_node;
|
||||
created_node.seq_num = 0;
|
||||
created_node.stat.czxid = zxid;
|
||||
created_node.stat.mzxid = zxid;
|
||||
created_node.stat.ctime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1);
|
||||
created_node.stat.mtime = created_node.stat.ctime;
|
||||
created_node.stat.numChildren = 0;
|
||||
created_node.stat.dataLength = request.data.length();
|
||||
created_node.data = request.data;
|
||||
created_node.is_ephemeral = request.is_ephemeral;
|
||||
created_node.is_sequental = request.is_sequential;
|
||||
std::string path_created = request.path;
|
||||
|
||||
if (request.is_sequential)
|
||||
{
|
||||
auto seq_num = it->second.seq_num;
|
||||
|
||||
std::stringstream seq_num_str; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
seq_num_str.exceptions(std::ios::failbit);
|
||||
seq_num_str << std::setw(10) << std::setfill('0') << seq_num;
|
||||
|
||||
path_created += seq_num_str.str();
|
||||
}
|
||||
|
||||
/// Increment sequential number even if node is not sequential
|
||||
++it->second.seq_num;
|
||||
|
||||
response.path_created = path_created;
|
||||
container.emplace(path_created, std::move(created_node));
|
||||
|
||||
if (request.is_ephemeral)
|
||||
ephemerals[session_id].emplace(path_created);
|
||||
|
||||
undo = [&container, &ephemerals, session_id, path_created, is_ephemeral = request.is_ephemeral, parent_path = it->first]
|
||||
{
|
||||
container.erase(path_created);
|
||||
if (is_ephemeral)
|
||||
ephemerals[session_id].erase(path_created);
|
||||
auto & undo_parent = container.at(parent_path);
|
||||
--undo_parent.stat.cversion;
|
||||
--undo_parent.stat.numChildren;
|
||||
--undo_parent.seq_num;
|
||||
};
|
||||
|
||||
++it->second.stat.cversion;
|
||||
++it->second.stat.numChildren;
|
||||
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
}
|
||||
|
||||
return { response_ptr, undo };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageGetRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /* zxid */, int64_t /* session_id */) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperGetResponse & response = dynamic_cast<Coordination::ZooKeeperGetResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperGetRequest & request = dynamic_cast<Coordination::ZooKeeperGetRequest &>(*zk_request);
|
||||
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else
|
||||
{
|
||||
response.stat = it->second.stat;
|
||||
response.data = it->second.data;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageRemoveRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t /*zxid*/, int64_t session_id) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperRemoveResponse & response = dynamic_cast<Coordination::ZooKeeperRemoveResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperRemoveRequest & request = dynamic_cast<Coordination::ZooKeeperRemoveRequest &>(*zk_request);
|
||||
Undo undo;
|
||||
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else if (request.version != -1 && request.version != it->second.stat.version)
|
||||
{
|
||||
response.error = Coordination::Error::ZBADVERSION;
|
||||
}
|
||||
else if (it->second.stat.numChildren)
|
||||
{
|
||||
response.error = Coordination::Error::ZNOTEMPTY;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto prev_node = it->second;
|
||||
if (prev_node.is_ephemeral)
|
||||
ephemerals[session_id].erase(request.path);
|
||||
|
||||
container.erase(it);
|
||||
auto & parent = container.at(parentPath(request.path));
|
||||
--parent.stat.numChildren;
|
||||
++parent.stat.cversion;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
|
||||
undo = [prev_node, &container, &ephemerals, session_id, path = request.path]
|
||||
{
|
||||
if (prev_node.is_ephemeral)
|
||||
ephemerals[session_id].emplace(path);
|
||||
|
||||
container.emplace(path, prev_node);
|
||||
auto & undo_parent = container.at(parentPath(path));
|
||||
++undo_parent.stat.numChildren;
|
||||
--undo_parent.stat.cversion;
|
||||
};
|
||||
}
|
||||
|
||||
return { response_ptr, undo };
|
||||
}
|
||||
|
||||
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
|
||||
{
|
||||
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::DELETED);
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageExistsRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /*zxid*/, int64_t /* session_id */) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperExistsResponse & response = dynamic_cast<Coordination::ZooKeeperExistsResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperExistsRequest & request = dynamic_cast<Coordination::ZooKeeperExistsRequest &>(*zk_request);
|
||||
|
||||
auto it = container.find(request.path);
|
||||
if (it != container.end())
|
||||
{
|
||||
response.stat = it->second.stat;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
else
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageSetRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t zxid, int64_t /* session_id */) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperSetResponse & response = dynamic_cast<Coordination::ZooKeeperSetResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperSetRequest & request = dynamic_cast<Coordination::ZooKeeperSetRequest &>(*zk_request);
|
||||
Undo undo;
|
||||
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else if (request.version == -1 || request.version == it->second.stat.version)
|
||||
{
|
||||
auto prev_node = it->second;
|
||||
|
||||
it->second.data = request.data;
|
||||
++it->second.stat.version;
|
||||
it->second.stat.mzxid = zxid;
|
||||
it->second.stat.mtime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1);
|
||||
it->second.stat.dataLength = request.data.length();
|
||||
it->second.data = request.data;
|
||||
++container.at(parentPath(request.path)).stat.cversion;
|
||||
response.stat = it->second.stat;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
|
||||
undo = [prev_node, &container, path = request.path]
|
||||
{
|
||||
container.at(path) = prev_node;
|
||||
--container.at(parentPath(path)).stat.cversion;
|
||||
};
|
||||
}
|
||||
else
|
||||
{
|
||||
response.error = Coordination::Error::ZBADVERSION;
|
||||
}
|
||||
|
||||
return { response_ptr, undo };
|
||||
}
|
||||
|
||||
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
|
||||
{
|
||||
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::CHANGED);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
struct TestKeeperStorageListRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /*zxid*/, int64_t /*session_id*/) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperListResponse & response = dynamic_cast<Coordination::ZooKeeperListResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperListRequest & request = dynamic_cast<Coordination::ZooKeeperListRequest &>(*zk_request);
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto path_prefix = request.path;
|
||||
if (path_prefix.empty())
|
||||
throw DB::Exception("Logical error: path cannot be empty", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (path_prefix.back() != '/')
|
||||
path_prefix += '/';
|
||||
|
||||
/// Fairly inefficient.
|
||||
for (auto child_it = container.upper_bound(path_prefix);
|
||||
child_it != container.end() && startsWith(child_it->first, path_prefix);
|
||||
++child_it)
|
||||
{
|
||||
if (parentPath(child_it->first) == request.path)
|
||||
response.names.emplace_back(baseName(child_it->first));
|
||||
}
|
||||
|
||||
response.stat = it->second.stat;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageCheckRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /*zxid*/, int64_t /*session_id*/) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperCheckResponse & response = dynamic_cast<Coordination::ZooKeeperCheckResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperCheckRequest & request = dynamic_cast<Coordination::ZooKeeperCheckRequest &>(*zk_request);
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else if (request.version != -1 && request.version != it->second.stat.version)
|
||||
{
|
||||
response.error = Coordination::Error::ZBADVERSION;
|
||||
}
|
||||
else
|
||||
{
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageMultiRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
std::vector<TestKeeperStorageRequestPtr> concrete_requests;
|
||||
explicit TestKeeperStorageMultiRequest(const Coordination::ZooKeeperRequestPtr & zk_request_)
|
||||
: TestKeeperStorageRequest(zk_request_)
|
||||
{
|
||||
Coordination::ZooKeeperMultiRequest & request = dynamic_cast<Coordination::ZooKeeperMultiRequest &>(*zk_request);
|
||||
concrete_requests.reserve(request.requests.size());
|
||||
|
||||
for (const auto & sub_request : request.requests)
|
||||
{
|
||||
auto sub_zk_request = dynamic_pointer_cast<Coordination::ZooKeeperRequest>(sub_request);
|
||||
if (sub_zk_request->getOpNum() == Coordination::OpNum::Create)
|
||||
{
|
||||
concrete_requests.push_back(std::make_shared<TestKeeperStorageCreateRequest>(sub_zk_request));
|
||||
}
|
||||
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Remove)
|
||||
{
|
||||
concrete_requests.push_back(std::make_shared<TestKeeperStorageRemoveRequest>(sub_zk_request));
|
||||
}
|
||||
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Set)
|
||||
{
|
||||
concrete_requests.push_back(std::make_shared<TestKeeperStorageSetRequest>(sub_zk_request));
|
||||
}
|
||||
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Check)
|
||||
{
|
||||
concrete_requests.push_back(std::make_shared<TestKeeperStorageCheckRequest>(sub_zk_request));
|
||||
}
|
||||
else
|
||||
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal command as part of multi ZooKeeper request {}", sub_zk_request->getOpNum());
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t zxid, int64_t session_id) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperMultiResponse & response = dynamic_cast<Coordination::ZooKeeperMultiResponse &>(*response_ptr);
|
||||
std::vector<Undo> undo_actions;
|
||||
|
||||
try
|
||||
{
|
||||
size_t i = 0;
|
||||
for (const auto & concrete_request : concrete_requests)
|
||||
{
|
||||
auto [ cur_response, undo_action ] = concrete_request->process(container, ephemerals, zxid, session_id);
|
||||
|
||||
response.responses[i] = cur_response;
|
||||
if (cur_response->error != Coordination::Error::ZOK)
|
||||
{
|
||||
for (size_t j = 0; j <= i; ++j)
|
||||
{
|
||||
auto response_error = response.responses[j]->error;
|
||||
response.responses[j] = std::make_shared<Coordination::ZooKeeperErrorResponse>();
|
||||
response.responses[j]->error = response_error;
|
||||
}
|
||||
|
||||
for (size_t j = i + 1; j < response.responses.size(); ++j)
|
||||
{
|
||||
response.responses[j] = std::make_shared<Coordination::ZooKeeperErrorResponse>();
|
||||
response.responses[j]->error = Coordination::Error::ZRUNTIMEINCONSISTENCY;
|
||||
}
|
||||
|
||||
for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it)
|
||||
if (*it)
|
||||
(*it)();
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
else
|
||||
undo_actions.emplace_back(std::move(undo_action));
|
||||
|
||||
++i;
|
||||
}
|
||||
|
||||
response.error = Coordination::Error::ZOK;
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it)
|
||||
if (*it)
|
||||
(*it)();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
|
||||
{
|
||||
for (const auto & generic_request : concrete_requests)
|
||||
generic_request->processWatches(watches, list_watches);
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageCloseRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container &, TestKeeperStorage::Ephemerals &, int64_t, int64_t) const override
|
||||
{
|
||||
throw DB::Exception("Called process on close request", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
};
|
||||
|
||||
void TestKeeperStorage::processingThread()
|
||||
{
|
||||
setThreadName("TestKeeperSProc");
|
||||
|
||||
try
|
||||
{
|
||||
while (!shutdown)
|
||||
{
|
||||
RequestInfo info;
|
||||
|
||||
UInt64 max_wait = UInt64(operation_timeout.totalMilliseconds());
|
||||
|
||||
if (requests_queue.tryPop(info, max_wait))
|
||||
{
|
||||
if (shutdown)
|
||||
break;
|
||||
|
||||
auto zk_request = info.request->zk_request;
|
||||
if (zk_request->getOpNum() == Coordination::OpNum::Close)
|
||||
{
|
||||
auto it = ephemerals.find(info.session_id);
|
||||
if (it != ephemerals.end())
|
||||
{
|
||||
for (const auto & ephemeral_path : it->second)
|
||||
{
|
||||
container.erase(ephemeral_path);
|
||||
processWatchesImpl(ephemeral_path, watches, list_watches, Coordination::Event::DELETED);
|
||||
}
|
||||
ephemerals.erase(it);
|
||||
}
|
||||
clearDeadWatches(info.session_id);
|
||||
|
||||
/// Finish connection
|
||||
auto response = std::make_shared<Coordination::ZooKeeperCloseResponse>();
|
||||
response->xid = zk_request->xid;
|
||||
response->zxid = getZXID();
|
||||
info.response_callback(response);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto [response, _] = info.request->process(container, ephemerals, zxid, info.session_id);
|
||||
|
||||
if (info.watch_callback)
|
||||
{
|
||||
if (response->error == Coordination::Error::ZOK)
|
||||
{
|
||||
auto & watches_type = zk_request->getOpNum() == Coordination::OpNum::List || zk_request->getOpNum() == Coordination::OpNum::SimpleList
|
||||
? list_watches
|
||||
: watches;
|
||||
|
||||
watches_type[zk_request->getPath()].emplace_back(Watcher{info.session_id, info.watch_callback});
|
||||
sessions_and_watchers[info.session_id].emplace(zk_request->getPath());
|
||||
}
|
||||
else if (response->error == Coordination::Error::ZNONODE && zk_request->getOpNum() == Coordination::OpNum::Exists)
|
||||
{
|
||||
watches[zk_request->getPath()].emplace_back(Watcher{info.session_id, info.watch_callback});
|
||||
sessions_and_watchers[info.session_id].emplace(zk_request->getPath());
|
||||
}
|
||||
else
|
||||
{
|
||||
std::shared_ptr<Coordination::ZooKeeperWatchResponse> watch_response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
|
||||
watch_response->path = zk_request->getPath();
|
||||
watch_response->xid = -1;
|
||||
watch_response->error = response->error;
|
||||
watch_response->type = Coordination::Event::NOTWATCHING;
|
||||
info.watch_callback(watch_response);
|
||||
}
|
||||
}
|
||||
|
||||
if (response->error == Coordination::Error::ZOK)
|
||||
info.request->processWatches(watches, list_watches);
|
||||
|
||||
response->xid = zk_request->xid;
|
||||
response->zxid = getZXID();
|
||||
|
||||
info.response_callback(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
finalize();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TestKeeperStorage::finalize()
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(push_request_mutex);
|
||||
|
||||
if (shutdown)
|
||||
return;
|
||||
|
||||
shutdown = true;
|
||||
|
||||
if (processing_thread.joinable())
|
||||
processing_thread.join();
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
{
|
||||
auto finish_watch = [] (const auto & watch_pair)
|
||||
{
|
||||
Coordination::ZooKeeperWatchResponse response;
|
||||
response.type = Coordination::SESSION;
|
||||
response.state = Coordination::EXPIRED_SESSION;
|
||||
response.error = Coordination::Error::ZSESSIONEXPIRED;
|
||||
|
||||
for (auto & watcher : watch_pair.second)
|
||||
{
|
||||
if (watcher.watch_callback)
|
||||
{
|
||||
try
|
||||
{
|
||||
watcher.watch_callback(std::make_shared<Coordination::ZooKeeperWatchResponse>(response));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
for (auto & path_watch : watches)
|
||||
finish_watch(path_watch);
|
||||
watches.clear();
|
||||
for (auto & path_watch : list_watches)
|
||||
finish_watch(path_watch);
|
||||
list_watches.clear();
|
||||
sessions_and_watchers.clear();
|
||||
}
|
||||
RequestInfo info;
|
||||
while (requests_queue.tryPop(info))
|
||||
{
|
||||
auto response = info.request->zk_request->makeResponse();
|
||||
response->error = Coordination::Error::ZSESSIONEXPIRED;
|
||||
try
|
||||
{
|
||||
info.response_callback(response);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TestKeeperWrapperFactory final : private boost::noncopyable
|
||||
{
|
||||
|
||||
public:
|
||||
using Creator = std::function<TestKeeperStorageRequestPtr(const Coordination::ZooKeeperRequestPtr &)>;
|
||||
using OpNumToRequest = std::unordered_map<Coordination::OpNum, Creator>;
|
||||
|
||||
static TestKeeperWrapperFactory & instance()
|
||||
{
|
||||
static TestKeeperWrapperFactory factory;
|
||||
return factory;
|
||||
}
|
||||
|
||||
TestKeeperStorageRequestPtr get(const Coordination::ZooKeeperRequestPtr & zk_request) const
|
||||
{
|
||||
auto it = op_num_to_request.find(zk_request->getOpNum());
|
||||
if (it == op_num_to_request.end())
|
||||
throw DB::Exception("Unknown operation type " + toString(zk_request->getOpNum()), ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return it->second(zk_request);
|
||||
}
|
||||
|
||||
void registerRequest(Coordination::OpNum op_num, Creator creator)
|
||||
{
|
||||
if (!op_num_to_request.try_emplace(op_num, creator).second)
|
||||
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Request with op num {} already registered", op_num);
|
||||
}
|
||||
|
||||
private:
|
||||
OpNumToRequest op_num_to_request;
|
||||
TestKeeperWrapperFactory();
|
||||
};
|
||||
|
||||
template<Coordination::OpNum num, typename RequestT>
|
||||
void registerTestKeeperRequestWrapper(TestKeeperWrapperFactory & factory)
|
||||
{
|
||||
factory.registerRequest(num, [] (const Coordination::ZooKeeperRequestPtr & zk_request) { return std::make_shared<RequestT>(zk_request); });
|
||||
}
|
||||
|
||||
|
||||
TestKeeperWrapperFactory::TestKeeperWrapperFactory()
|
||||
{
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Heartbeat, TestKeeperStorageHeartbeatRequest>(*this);
|
||||
//registerTestKeeperRequestWrapper<Coordination::OpNum::Auth, TestKeeperStorageAuthRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Close, TestKeeperStorageCloseRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Create, TestKeeperStorageCreateRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Remove, TestKeeperStorageRemoveRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Exists, TestKeeperStorageExistsRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Get, TestKeeperStorageGetRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Set, TestKeeperStorageSetRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::List, TestKeeperStorageListRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::SimpleList, TestKeeperStorageListRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Check, TestKeeperStorageCheckRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Multi, TestKeeperStorageMultiRequest>(*this);
|
||||
}
|
||||
|
||||
void TestKeeperStorage::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback)
|
||||
{
|
||||
TestKeeperStorageRequestPtr storage_request = TestKeeperWrapperFactory::instance().get(request);
|
||||
RequestInfo request_info;
|
||||
request_info.time = clock::now();
|
||||
request_info.request = storage_request;
|
||||
request_info.session_id = session_id;
|
||||
request_info.response_callback = callback;
|
||||
|
||||
/// Put close requests without timeouts
|
||||
auto timeout = request->getOpNum() == Coordination::OpNum::Close ? 0 : operation_timeout.totalMilliseconds();
|
||||
std::lock_guard lock(push_request_mutex);
|
||||
if (!requests_queue.tryPush(std::move(request_info), timeout))
|
||||
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
void TestKeeperStorage::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback, ResponseCallback watch_callback)
|
||||
{
|
||||
TestKeeperStorageRequestPtr storage_request = TestKeeperWrapperFactory::instance().get(request);
|
||||
RequestInfo request_info;
|
||||
request_info.time = clock::now();
|
||||
request_info.request = storage_request;
|
||||
request_info.session_id = session_id;
|
||||
request_info.response_callback = callback;
|
||||
if (request->has_watch)
|
||||
request_info.watch_callback = watch_callback;
|
||||
|
||||
/// Put close requests without timeouts
|
||||
auto timeout = request->getOpNum() == Coordination::OpNum::Close ? 0 : operation_timeout.totalMilliseconds();
|
||||
std::lock_guard lock(push_request_mutex);
|
||||
if (!requests_queue.tryPush(std::move(request_info), timeout))
|
||||
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
TestKeeperStorage::~TestKeeperStorage()
|
||||
{
|
||||
try
|
||||
{
|
||||
finalize();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
void TestKeeperStorage::clearDeadWatches(int64_t session_id)
|
||||
{
|
||||
auto watches_it = sessions_and_watchers.find(session_id);
|
||||
if (watches_it != sessions_and_watchers.end())
|
||||
{
|
||||
for (const auto & watch_path : watches_it->second)
|
||||
{
|
||||
auto watch = watches.find(watch_path);
|
||||
if (watch != watches.end())
|
||||
{
|
||||
auto & watches_for_path = watch->second;
|
||||
for (auto w_it = watches_for_path.begin(); w_it != watches_for_path.end();)
|
||||
{
|
||||
if (w_it->session_id == session_id)
|
||||
w_it = watches_for_path.erase(w_it);
|
||||
else
|
||||
++w_it;
|
||||
}
|
||||
if (watches_for_path.empty())
|
||||
watches.erase(watch);
|
||||
}
|
||||
}
|
||||
sessions_and_watchers.erase(watches_it);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
104
src/Common/ZooKeeper/TestKeeperStorage.h
Normal file
104
src/Common/ZooKeeper/TestKeeperStorage.h
Normal file
@ -0,0 +1,104 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <future>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace zkutil
|
||||
{
|
||||
|
||||
using namespace DB;
|
||||
struct TestKeeperStorageRequest;
|
||||
using TestKeeperStorageRequestPtr = std::shared_ptr<TestKeeperStorageRequest>;
|
||||
using ResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr &)>;
|
||||
|
||||
class TestKeeperStorage
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
Poco::Timespan operation_timeout{0, Coordination::DEFAULT_OPERATION_TIMEOUT_MS * 1000};
|
||||
std::atomic<int64_t> session_id_counter{0};
|
||||
|
||||
struct Node
|
||||
{
|
||||
String data;
|
||||
Coordination::ACLs acls;
|
||||
bool is_ephemeral = false;
|
||||
bool is_sequental = false;
|
||||
Coordination::Stat stat{};
|
||||
int32_t seq_num = 0;
|
||||
};
|
||||
|
||||
struct Watcher
|
||||
{
|
||||
int64_t session_id;
|
||||
ResponseCallback watch_callback;
|
||||
};
|
||||
|
||||
using Container = std::map<std::string, Node>;
|
||||
using Ephemerals = std::unordered_map<int64_t, std::unordered_set<String>>;
|
||||
using SessionAndWatcher = std::unordered_map<int64_t, std::unordered_set<String>>;
|
||||
|
||||
using WatchCallbacks = std::vector<Watcher>;
|
||||
using Watches = std::map<String /* path, relative of root_path */, WatchCallbacks>;
|
||||
|
||||
Container container;
|
||||
Ephemerals ephemerals;
|
||||
SessionAndWatcher sessions_and_watchers;
|
||||
|
||||
std::atomic<int64_t> zxid{0};
|
||||
std::atomic<bool> shutdown{false};
|
||||
|
||||
Watches watches;
|
||||
Watches list_watches; /// Watches for 'list' request (watches on children).
|
||||
|
||||
using clock = std::chrono::steady_clock;
|
||||
|
||||
struct RequestInfo
|
||||
{
|
||||
TestKeeperStorageRequestPtr request;
|
||||
ResponseCallback response_callback;
|
||||
ResponseCallback watch_callback;
|
||||
clock::time_point time;
|
||||
int64_t session_id;
|
||||
};
|
||||
|
||||
std::mutex push_request_mutex;
|
||||
using RequestsQueue = ConcurrentBoundedQueue<RequestInfo>;
|
||||
RequestsQueue requests_queue{1};
|
||||
|
||||
void finalize();
|
||||
|
||||
ThreadFromGlobalPool processing_thread;
|
||||
|
||||
void processingThread();
|
||||
void clearDeadWatches(int64_t session_id);
|
||||
|
||||
public:
|
||||
using AsyncResponse = std::future<Coordination::ZooKeeperResponsePtr>;
|
||||
TestKeeperStorage();
|
||||
~TestKeeperStorage();
|
||||
struct ResponsePair
|
||||
{
|
||||
AsyncResponse response;
|
||||
std::optional<AsyncResponse> watch_response;
|
||||
};
|
||||
void putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback);
|
||||
void putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback, ResponseCallback watch_callback);
|
||||
|
||||
int64_t getSessionID()
|
||||
{
|
||||
return session_id_counter.fetch_add(1);
|
||||
}
|
||||
int64_t getZXID()
|
||||
{
|
||||
return zxid.fetch_add(1);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -129,8 +129,8 @@ struct ZooKeeperArgs
|
||||
|
||||
std::vector<std::string> hosts_strings;
|
||||
|
||||
session_timeout_ms = DEFAULT_SESSION_TIMEOUT;
|
||||
operation_timeout_ms = DEFAULT_OPERATION_TIMEOUT;
|
||||
session_timeout_ms = Coordination::DEFAULT_SESSION_TIMEOUT_MS;
|
||||
operation_timeout_ms = Coordination::DEFAULT_OPERATION_TIMEOUT_MS;
|
||||
implementation = "zookeeper";
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
||||
@ -28,9 +29,6 @@ namespace CurrentMetrics
|
||||
namespace zkutil
|
||||
{
|
||||
|
||||
const UInt32 DEFAULT_SESSION_TIMEOUT = 30000;
|
||||
const UInt32 DEFAULT_OPERATION_TIMEOUT = 10000;
|
||||
|
||||
/// Preferred size of multi() command (in number of ops)
|
||||
constexpr size_t MULTI_BATCH_SIZE = 100;
|
||||
|
||||
@ -53,8 +51,8 @@ public:
|
||||
using Ptr = std::shared_ptr<ZooKeeper>;
|
||||
|
||||
ZooKeeper(const std::string & hosts_, const std::string & identity_ = "",
|
||||
int32_t session_timeout_ms_ = DEFAULT_SESSION_TIMEOUT,
|
||||
int32_t operation_timeout_ms_ = DEFAULT_OPERATION_TIMEOUT,
|
||||
int32_t session_timeout_ms_ = Coordination::DEFAULT_SESSION_TIMEOUT_MS,
|
||||
int32_t operation_timeout_ms_ = Coordination::DEFAULT_OPERATION_TIMEOUT_MS,
|
||||
const std::string & chroot_ = "",
|
||||
const std::string & implementation_ = "zookeeper");
|
||||
|
||||
|
481
src/Common/ZooKeeper/ZooKeeperCommon.cpp
Normal file
481
src/Common/ZooKeeper/ZooKeeperCommon.cpp
Normal file
@ -0,0 +1,481 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <array>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
using namespace DB;
|
||||
|
||||
void ZooKeeperResponse::write(WriteBuffer & out) const
|
||||
{
|
||||
/// Excessive copy to calculate length.
|
||||
WriteBufferFromOwnString buf;
|
||||
Coordination::write(xid, buf);
|
||||
Coordination::write(zxid, buf);
|
||||
Coordination::write(error, buf);
|
||||
if (error == Error::ZOK)
|
||||
writeImpl(buf);
|
||||
Coordination::write(buf.str(), out);
|
||||
out.next();
|
||||
}
|
||||
|
||||
void ZooKeeperRequest::write(WriteBuffer & out) const
|
||||
{
|
||||
/// Excessive copy to calculate length.
|
||||
WriteBufferFromOwnString buf;
|
||||
Coordination::write(xid, buf);
|
||||
Coordination::write(getOpNum(), buf);
|
||||
writeImpl(buf);
|
||||
Coordination::write(buf.str(), out);
|
||||
out.next();
|
||||
}
|
||||
|
||||
void ZooKeeperWatchResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(type, in);
|
||||
Coordination::read(state, in);
|
||||
Coordination::read(path, in);
|
||||
}
|
||||
|
||||
void ZooKeeperWatchResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(type, out);
|
||||
Coordination::write(state, out);
|
||||
Coordination::write(path, out);
|
||||
}
|
||||
|
||||
void ZooKeeperAuthRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(type, out);
|
||||
Coordination::write(scheme, out);
|
||||
Coordination::write(data, out);
|
||||
}
|
||||
|
||||
void ZooKeeperAuthRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(type, in);
|
||||
Coordination::read(scheme, in);
|
||||
Coordination::read(data, in);
|
||||
}
|
||||
|
||||
void ZooKeeperCreateRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(data, out);
|
||||
Coordination::write(acls, out);
|
||||
|
||||
int32_t flags = 0;
|
||||
|
||||
if (is_ephemeral)
|
||||
flags |= 1;
|
||||
if (is_sequential)
|
||||
flags |= 2;
|
||||
|
||||
Coordination::write(flags, out);
|
||||
}
|
||||
|
||||
void ZooKeeperCreateRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(data, in);
|
||||
Coordination::read(acls, in);
|
||||
|
||||
int32_t flags = 0;
|
||||
Coordination::read(flags, in);
|
||||
|
||||
if (flags & 1)
|
||||
is_ephemeral = true;
|
||||
if (flags & 2)
|
||||
is_sequential = true;
|
||||
}
|
||||
|
||||
void ZooKeeperCreateResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path_created, in);
|
||||
}
|
||||
|
||||
void ZooKeeperCreateResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path_created, out);
|
||||
}
|
||||
|
||||
void ZooKeeperRemoveRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
|
||||
void ZooKeeperRemoveRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(stat, out);
|
||||
}
|
||||
|
||||
void ZooKeeperGetRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
|
||||
void ZooKeeperGetRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
void ZooKeeperGetResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(data, in);
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
|
||||
void ZooKeeperGetResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(data, out);
|
||||
Coordination::write(stat, out);
|
||||
}
|
||||
|
||||
void ZooKeeperSetRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(data, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
|
||||
void ZooKeeperSetRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(data, in);
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
void ZooKeeperSetResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
|
||||
void ZooKeeperSetResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(stat, out);
|
||||
}
|
||||
|
||||
void ZooKeeperListRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
|
||||
void ZooKeeperListRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
void ZooKeeperListResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(names, in);
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
|
||||
void ZooKeeperListResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(names, out);
|
||||
Coordination::write(stat, out);
|
||||
}
|
||||
|
||||
void ZooKeeperCheckRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
|
||||
void ZooKeeperCheckRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
void ZooKeeperErrorResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::Error read_error;
|
||||
Coordination::read(read_error, in);
|
||||
|
||||
if (read_error != error)
|
||||
throw Exception(fmt::format("Error code in ErrorResponse ({}) doesn't match error code in header ({})", read_error, error),
|
||||
Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
|
||||
void ZooKeeperErrorResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(error, out);
|
||||
}
|
||||
|
||||
ZooKeeperMultiRequest::ZooKeeperMultiRequest(const Requests & generic_requests, const ACLs & default_acls)
|
||||
{
|
||||
/// Convert nested Requests to ZooKeeperRequests.
|
||||
/// Note that deep copy is required to avoid modifying path in presence of chroot prefix.
|
||||
requests.reserve(generic_requests.size());
|
||||
|
||||
for (const auto & generic_request : generic_requests)
|
||||
{
|
||||
if (const auto * concrete_request_create = dynamic_cast<const CreateRequest *>(generic_request.get()))
|
||||
{
|
||||
auto create = std::make_shared<ZooKeeperCreateRequest>(*concrete_request_create);
|
||||
if (create->acls.empty())
|
||||
create->acls = default_acls;
|
||||
requests.push_back(create);
|
||||
}
|
||||
else if (const auto * concrete_request_remove = dynamic_cast<const RemoveRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperRemoveRequest>(*concrete_request_remove));
|
||||
}
|
||||
else if (const auto * concrete_request_set = dynamic_cast<const SetRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperSetRequest>(*concrete_request_set));
|
||||
}
|
||||
else if (const auto * concrete_request_check = dynamic_cast<const CheckRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperCheckRequest>(*concrete_request_check));
|
||||
}
|
||||
else
|
||||
throw Exception("Illegal command as part of multi ZooKeeper request", Error::ZBADARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
void ZooKeeperMultiRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
for (const auto & request : requests)
|
||||
{
|
||||
const auto & zk_request = dynamic_cast<const ZooKeeperRequest &>(*request);
|
||||
|
||||
bool done = false;
|
||||
int32_t error = -1;
|
||||
|
||||
Coordination::write(zk_request.getOpNum(), out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(error, out);
|
||||
|
||||
zk_request.writeImpl(out);
|
||||
}
|
||||
|
||||
OpNum op_num = OpNum::Error;
|
||||
bool done = true;
|
||||
int32_t error = -1;
|
||||
|
||||
Coordination::write(op_num, out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(error, out);
|
||||
}
|
||||
|
||||
void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
|
||||
while (true)
|
||||
{
|
||||
OpNum op_num;
|
||||
bool done;
|
||||
int32_t error;
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(error, in);
|
||||
|
||||
if (done)
|
||||
{
|
||||
if (op_num != OpNum::Error)
|
||||
throw Exception("Unexpected op_num received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
if (error != -1)
|
||||
throw Exception("Unexpected error value received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
break;
|
||||
}
|
||||
|
||||
ZooKeeperRequestPtr request = ZooKeeperRequestFactory::instance().get(op_num);
|
||||
request->readImpl(in);
|
||||
requests.push_back(request);
|
||||
|
||||
if (in.eof())
|
||||
throw Exception("Not enough results received for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
}
|
||||
|
||||
void ZooKeeperMultiResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
for (auto & response : responses)
|
||||
{
|
||||
OpNum op_num;
|
||||
bool done;
|
||||
Error op_error;
|
||||
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(op_error, in);
|
||||
|
||||
if (done)
|
||||
throw Exception("Not enough results received for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
|
||||
/// op_num == -1 is special for multi transaction.
|
||||
/// For unknown reason, error code is duplicated in header and in response body.
|
||||
|
||||
if (op_num == OpNum::Error)
|
||||
response = std::make_shared<ZooKeeperErrorResponse>();
|
||||
|
||||
if (op_error != Error::ZOK)
|
||||
{
|
||||
response->error = op_error;
|
||||
|
||||
/// Set error for whole transaction.
|
||||
/// If some operations fail, ZK send global error as zero and then send details about each operation.
|
||||
/// It will set error code for first failed operation and it will set special "runtime inconsistency" code for other operations.
|
||||
if (error == Error::ZOK && op_error != Error::ZRUNTIMEINCONSISTENCY)
|
||||
error = op_error;
|
||||
}
|
||||
|
||||
if (op_error == Error::ZOK || op_num == OpNum::Error)
|
||||
dynamic_cast<ZooKeeperResponse &>(*response).readImpl(in);
|
||||
}
|
||||
|
||||
/// Footer.
|
||||
{
|
||||
OpNum op_num;
|
||||
bool done;
|
||||
int32_t error_read;
|
||||
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(error_read, in);
|
||||
|
||||
if (!done)
|
||||
throw Exception("Too many results received for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
if (op_num != OpNum::Error)
|
||||
throw Exception("Unexpected op_num received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
if (error_read != -1)
|
||||
throw Exception("Unexpected error value received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
}
|
||||
|
||||
void ZooKeeperMultiResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
for (const auto & response : responses)
|
||||
{
|
||||
const ZooKeeperResponse & zk_response = dynamic_cast<const ZooKeeperResponse &>(*response);
|
||||
OpNum op_num = zk_response.getOpNum();
|
||||
bool done = false;
|
||||
Error op_error = zk_response.error;
|
||||
|
||||
Coordination::write(op_num, out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(op_error, out);
|
||||
if (op_error == Error::ZOK || op_num == OpNum::Error)
|
||||
zk_response.writeImpl(out);
|
||||
}
|
||||
|
||||
/// Footer.
|
||||
{
|
||||
OpNum op_num = OpNum::Error;
|
||||
bool done = true;
|
||||
int32_t error_read = - 1;
|
||||
|
||||
Coordination::write(op_num, out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(error_read, out);
|
||||
}
|
||||
}
|
||||
|
||||
ZooKeeperResponsePtr ZooKeeperHeartbeatRequest::makeResponse() const { return std::make_shared<ZooKeeperHeartbeatResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperAuthRequest::makeResponse() const { return std::make_shared<ZooKeeperAuthResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperCreateRequest::makeResponse() const { return std::make_shared<ZooKeeperCreateResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return std::make_shared<ZooKeeperRemoveResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return std::make_shared<ZooKeeperExistsResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return std::make_shared<ZooKeeperGetResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return std::make_shared<ZooKeeperSetResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return std::make_shared<ZooKeeperListResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperCheckRequest::makeResponse() const { return std::make_shared<ZooKeeperCheckResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const { return std::make_shared<ZooKeeperMultiResponse>(requests); }
|
||||
ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return std::make_shared<ZooKeeperCloseResponse>(); }
|
||||
|
||||
void ZooKeeperRequestFactory::registerRequest(OpNum op_num, Creator creator)
|
||||
{
|
||||
if (!op_num_to_request.try_emplace(op_num, creator).second)
|
||||
throw Coordination::Exception("Request type " + toString(op_num) + " already registered", Coordination::Error::ZRUNTIMEINCONSISTENCY);
|
||||
}
|
||||
|
||||
std::shared_ptr<ZooKeeperRequest> ZooKeeperRequest::read(ReadBuffer & in)
|
||||
{
|
||||
XID xid;
|
||||
OpNum op_num;
|
||||
|
||||
Coordination::read(xid, in);
|
||||
Coordination::read(op_num, in);
|
||||
|
||||
auto request = ZooKeeperRequestFactory::instance().get(op_num);
|
||||
request->xid = xid;
|
||||
request->readImpl(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
ZooKeeperRequestPtr ZooKeeperRequestFactory::get(OpNum op_num) const
|
||||
{
|
||||
auto it = op_num_to_request.find(op_num);
|
||||
if (it == op_num_to_request.end())
|
||||
throw Exception("Unknown operation type " + toString(op_num), Error::ZBADARGUMENTS);
|
||||
|
||||
return it->second();
|
||||
}
|
||||
|
||||
ZooKeeperRequestFactory & ZooKeeperRequestFactory::instance()
|
||||
{
|
||||
static ZooKeeperRequestFactory factory;
|
||||
return factory;
|
||||
}
|
||||
|
||||
template<OpNum num, typename RequestT>
|
||||
void registerZooKeeperRequest(ZooKeeperRequestFactory & factory)
|
||||
{
|
||||
factory.registerRequest(num, [] { return std::make_shared<RequestT>(); });
|
||||
}
|
||||
|
||||
ZooKeeperRequestFactory::ZooKeeperRequestFactory()
|
||||
{
|
||||
registerZooKeeperRequest<OpNum::Heartbeat, ZooKeeperHeartbeatRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Auth, ZooKeeperAuthRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Close, ZooKeeperCloseRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Create, ZooKeeperCreateRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Remove, ZooKeeperRemoveRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Exists, ZooKeeperExistsRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Get, ZooKeeperGetRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Set, ZooKeeperSetRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::SimpleList, ZooKeeperSimpleListRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::List, ZooKeeperListRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Check, ZooKeeperCheckRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Multi, ZooKeeperMultiRequest>(*this);
|
||||
}
|
||||
|
||||
}
|
338
src/Common/ZooKeeper/ZooKeeperCommon.h
Normal file
338
src/Common/ZooKeeper/ZooKeeperCommon.h
Normal file
@ -0,0 +1,338 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
#include <mutex>
|
||||
#include <chrono>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <functional>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
struct ZooKeeperResponse : virtual Response
|
||||
{
|
||||
XID xid = 0;
|
||||
int64_t zxid;
|
||||
|
||||
virtual ~ZooKeeperResponse() override = default;
|
||||
virtual void readImpl(ReadBuffer &) = 0;
|
||||
virtual void writeImpl(WriteBuffer &) const = 0;
|
||||
void write(WriteBuffer & out) const;
|
||||
virtual OpNum getOpNum() const = 0;
|
||||
};
|
||||
|
||||
using ZooKeeperResponsePtr = std::shared_ptr<ZooKeeperResponse>;
|
||||
|
||||
/// Exposed in header file for Yandex.Metrica code.
|
||||
struct ZooKeeperRequest : virtual Request
|
||||
{
|
||||
XID xid = 0;
|
||||
bool has_watch = false;
|
||||
/// If the request was not send and the error happens, we definitely sure, that it has not been processed by the server.
|
||||
/// If the request was sent and we didn't get the response and the error happens, then we cannot be sure was it processed or not.
|
||||
bool probably_sent = false;
|
||||
|
||||
ZooKeeperRequest() = default;
|
||||
ZooKeeperRequest(const ZooKeeperRequest &) = default;
|
||||
virtual ~ZooKeeperRequest() override = default;
|
||||
|
||||
virtual OpNum getOpNum() const = 0;
|
||||
|
||||
/// Writes length, xid, op_num, then the rest.
|
||||
void write(WriteBuffer & out) const;
|
||||
|
||||
virtual void writeImpl(WriteBuffer &) const = 0;
|
||||
virtual void readImpl(ReadBuffer &) = 0;
|
||||
|
||||
static std::shared_ptr<ZooKeeperRequest> read(ReadBuffer & in);
|
||||
|
||||
virtual ZooKeeperResponsePtr makeResponse() const = 0;
|
||||
};
|
||||
|
||||
using ZooKeeperRequestPtr = std::shared_ptr<ZooKeeperRequest>;
|
||||
|
||||
struct ZooKeeperHeartbeatRequest final : ZooKeeperRequest
|
||||
{
|
||||
String getPath() const override { return {}; }
|
||||
OpNum getOpNum() const override { return OpNum::Heartbeat; }
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperHeartbeatResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
OpNum getOpNum() const override { return OpNum::Heartbeat; }
|
||||
};
|
||||
|
||||
struct ZooKeeperWatchResponse final : WatchResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
|
||||
OpNum getOpNum() const override
|
||||
{
|
||||
throw Exception("OpNum for watch response doesn't exist", Error::ZRUNTIMEINCONSISTENCY);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperAuthRequest final : ZooKeeperRequest
|
||||
{
|
||||
int32_t type = 0; /// ignored by the server
|
||||
String scheme;
|
||||
String data;
|
||||
|
||||
String getPath() const override { return {}; }
|
||||
OpNum getOpNum() const override { return OpNum::Auth; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperAuthResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Auth; }
|
||||
};
|
||||
|
||||
struct ZooKeeperCloseRequest final : ZooKeeperRequest
|
||||
{
|
||||
String getPath() const override { return {}; }
|
||||
OpNum getOpNum() const override { return OpNum::Close; }
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCloseResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override
|
||||
{
|
||||
throw Exception("Received response for close request", Error::ZRUNTIMEINCONSISTENCY);
|
||||
}
|
||||
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Close; }
|
||||
};
|
||||
|
||||
struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperCreateRequest() = default;
|
||||
explicit ZooKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Create; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Create; }
|
||||
};
|
||||
|
||||
struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperRemoveRequest() = default;
|
||||
explicit ZooKeeperRemoveRequest(const RemoveRequest & base) : RemoveRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Remove; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
OpNum getOpNum() const override { return OpNum::Remove; }
|
||||
};
|
||||
|
||||
struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::Exists; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperExistsResponse final : ExistsResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
OpNum getOpNum() const override { return OpNum::Exists; }
|
||||
};
|
||||
|
||||
struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::Get; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperGetResponse final : GetResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
OpNum getOpNum() const override { return OpNum::Get; }
|
||||
};
|
||||
|
||||
struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperSetRequest() = default;
|
||||
explicit ZooKeeperSetRequest(const SetRequest & base) : SetRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Set; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
OpNum getOpNum() const override { return OpNum::Set; }
|
||||
};
|
||||
|
||||
struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::List; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperSimpleListRequest final : ZooKeeperListRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::SimpleList; }
|
||||
};
|
||||
|
||||
struct ZooKeeperListResponse : ListResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
OpNum getOpNum() const override { return OpNum::List; }
|
||||
};
|
||||
|
||||
struct ZooKeeperSimpleListResponse final : ZooKeeperListResponse
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::SimpleList; }
|
||||
};
|
||||
|
||||
struct ZooKeeperCheckRequest final : CheckRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperCheckRequest() = default;
|
||||
explicit ZooKeeperCheckRequest(const CheckRequest & base) : CheckRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Check; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCheckResponse final : CheckResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
OpNum getOpNum() const override { return OpNum::Check; }
|
||||
};
|
||||
|
||||
/// This response may be received only as an element of responses in MultiResponse.
|
||||
struct ZooKeeperErrorResponse final : ErrorResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Error; }
|
||||
};
|
||||
|
||||
struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::Multi; }
|
||||
ZooKeeperMultiRequest() = default;
|
||||
|
||||
ZooKeeperMultiRequest(const Requests & generic_requests, const ACLs & default_acls);
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::Multi; }
|
||||
|
||||
explicit ZooKeeperMultiResponse(const Requests & requests)
|
||||
{
|
||||
responses.reserve(requests.size());
|
||||
|
||||
for (const auto & request : requests)
|
||||
responses.emplace_back(dynamic_cast<const ZooKeeperRequest &>(*request).makeResponse());
|
||||
}
|
||||
|
||||
explicit ZooKeeperMultiResponse(const Responses & responses_)
|
||||
{
|
||||
responses = responses_;
|
||||
}
|
||||
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
|
||||
};
|
||||
|
||||
class ZooKeeperRequestFactory final : private boost::noncopyable
|
||||
{
|
||||
|
||||
public:
|
||||
using Creator = std::function<ZooKeeperRequestPtr()>;
|
||||
using OpNumToRequest = std::unordered_map<OpNum, Creator>;
|
||||
|
||||
static ZooKeeperRequestFactory & instance();
|
||||
|
||||
ZooKeeperRequestPtr get(OpNum op_num) const;
|
||||
|
||||
void registerRequest(OpNum op_num, Creator creator);
|
||||
|
||||
private:
|
||||
OpNumToRequest op_num_to_request;
|
||||
|
||||
private:
|
||||
ZooKeeperRequestFactory();
|
||||
};
|
||||
|
||||
}
|
67
src/Common/ZooKeeper/ZooKeeperConstants.cpp
Normal file
67
src/Common/ZooKeeper/ZooKeeperConstants.cpp
Normal file
@ -0,0 +1,67 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
static const std::unordered_set<int32_t> VALID_OPERATIONS =
|
||||
{
|
||||
static_cast<int32_t>(OpNum::Close),
|
||||
static_cast<int32_t>(OpNum::Error),
|
||||
static_cast<int32_t>(OpNum::Create),
|
||||
static_cast<int32_t>(OpNum::Remove),
|
||||
static_cast<int32_t>(OpNum::Exists),
|
||||
static_cast<int32_t>(OpNum::Get),
|
||||
static_cast<int32_t>(OpNum::Set),
|
||||
static_cast<int32_t>(OpNum::SimpleList),
|
||||
static_cast<int32_t>(OpNum::Heartbeat),
|
||||
static_cast<int32_t>(OpNum::List),
|
||||
static_cast<int32_t>(OpNum::Check),
|
||||
static_cast<int32_t>(OpNum::Multi),
|
||||
static_cast<int32_t>(OpNum::Auth),
|
||||
};
|
||||
|
||||
std::string toString(OpNum op_num)
|
||||
{
|
||||
switch (op_num)
|
||||
{
|
||||
case OpNum::Close:
|
||||
return "Close";
|
||||
case OpNum::Error:
|
||||
return "Error";
|
||||
case OpNum::Create:
|
||||
return "Create";
|
||||
case OpNum::Remove:
|
||||
return "Remove";
|
||||
case OpNum::Exists:
|
||||
return "Exists";
|
||||
case OpNum::Get:
|
||||
return "Get";
|
||||
case OpNum::Set:
|
||||
return "Set";
|
||||
case OpNum::SimpleList:
|
||||
return "SimpleList";
|
||||
case OpNum::List:
|
||||
return "List";
|
||||
case OpNum::Check:
|
||||
return "Check";
|
||||
case OpNum::Multi:
|
||||
return "Multi";
|
||||
case OpNum::Heartbeat:
|
||||
return "Heartbeat";
|
||||
case OpNum::Auth:
|
||||
return "Auth";
|
||||
}
|
||||
int32_t raw_op = static_cast<int32_t>(op_num);
|
||||
throw Exception("Operation " + std::to_string(raw_op) + " is unknown", Error::ZUNIMPLEMENTED);
|
||||
}
|
||||
|
||||
OpNum getOpNum(int32_t raw_op_num)
|
||||
{
|
||||
if (!VALID_OPERATIONS.count(raw_op_num))
|
||||
throw Exception("Operation " + std::to_string(raw_op_num) + " is unknown", Error::ZUNIMPLEMENTED);
|
||||
return static_cast<OpNum>(raw_op_num);
|
||||
}
|
||||
|
||||
}
|
49
src/Common/ZooKeeper/ZooKeeperConstants.h
Normal file
49
src/Common/ZooKeeper/ZooKeeperConstants.h
Normal file
@ -0,0 +1,49 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <cstdint>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
using XID = int32_t;
|
||||
|
||||
static constexpr XID WATCH_XID = -1;
|
||||
static constexpr XID PING_XID = -2;
|
||||
static constexpr XID AUTH_XID = -4;
|
||||
static constexpr XID CLOSE_XID = 0x7FFFFFFF;
|
||||
|
||||
enum class OpNum : int32_t
|
||||
{
|
||||
Close = -11,
|
||||
Error = -1,
|
||||
Create = 1,
|
||||
Remove = 2,
|
||||
Exists = 3,
|
||||
Get = 4,
|
||||
Set = 5,
|
||||
SimpleList = 8,
|
||||
Heartbeat = 11,
|
||||
List = 12,
|
||||
Check = 13,
|
||||
Multi = 14,
|
||||
Auth = 100,
|
||||
};
|
||||
|
||||
std::string toString(OpNum op_num);
|
||||
OpNum getOpNum(int32_t raw_op_num);
|
||||
|
||||
static constexpr int32_t ZOOKEEPER_PROTOCOL_VERSION = 0;
|
||||
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH = 44;
|
||||
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH_WITH_READONLY = 45;
|
||||
static constexpr int32_t SERVER_HANDSHAKE_LENGTH = 36;
|
||||
static constexpr int32_t PASSWORD_LENGTH = 16;
|
||||
|
||||
/// ZooKeeper has 1 MB node size and serialization limit by default,
|
||||
/// but it can be raised up, so we have a slightly larger limit on our side.
|
||||
static constexpr int32_t MAX_STRING_OR_ARRAY_SIZE = 1 << 28; /// 256 MiB
|
||||
static constexpr int32_t DEFAULT_SESSION_TIMEOUT_MS = 30000;
|
||||
static constexpr int32_t DEFAULT_OPERATION_TIMEOUT_MS = 10000;
|
||||
|
||||
}
|
140
src/Common/ZooKeeper/ZooKeeperIO.cpp
Normal file
140
src/Common/ZooKeeper/ZooKeeperIO.cpp
Normal file
@ -0,0 +1,140 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
void write(int64_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap64(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
void write(int32_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap32(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
void write(OpNum x, WriteBuffer & out)
|
||||
{
|
||||
write(static_cast<int32_t>(x), out);
|
||||
}
|
||||
|
||||
void write(bool x, WriteBuffer & out)
|
||||
{
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
void write(const std::string & s, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(s.size()), out);
|
||||
out.write(s.data(), s.size());
|
||||
}
|
||||
|
||||
void write(const ACL & acl, WriteBuffer & out)
|
||||
{
|
||||
write(acl.permissions, out);
|
||||
write(acl.scheme, out);
|
||||
write(acl.id, out);
|
||||
}
|
||||
|
||||
void write(const Stat & stat, WriteBuffer & out)
|
||||
{
|
||||
write(stat.czxid, out);
|
||||
write(stat.mzxid, out);
|
||||
write(stat.ctime, out);
|
||||
write(stat.mtime, out);
|
||||
write(stat.version, out);
|
||||
write(stat.cversion, out);
|
||||
write(stat.aversion, out);
|
||||
write(stat.ephemeralOwner, out);
|
||||
write(stat.dataLength, out);
|
||||
write(stat.numChildren, out);
|
||||
write(stat.pzxid, out);
|
||||
}
|
||||
|
||||
void write(const Error & x, WriteBuffer & out)
|
||||
{
|
||||
write(static_cast<int32_t>(x), out);
|
||||
}
|
||||
|
||||
void read(int64_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap64(x);
|
||||
}
|
||||
|
||||
void read(int32_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap32(x);
|
||||
}
|
||||
|
||||
void read(OpNum & x, ReadBuffer & in)
|
||||
{
|
||||
int32_t raw_op_num;
|
||||
read(raw_op_num, in);
|
||||
x = getOpNum(raw_op_num);
|
||||
}
|
||||
|
||||
void read(bool & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
}
|
||||
|
||||
void read(int8_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
}
|
||||
|
||||
void read(std::string & s, ReadBuffer & in)
|
||||
{
|
||||
int32_t size = 0;
|
||||
read(size, in);
|
||||
|
||||
if (size == -1)
|
||||
{
|
||||
/// It means that zookeeper node has NULL value. We will treat it like empty string.
|
||||
s.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
if (size < 0)
|
||||
throw Exception("Negative size while reading string from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
|
||||
if (size > MAX_STRING_OR_ARRAY_SIZE)
|
||||
throw Exception("Too large string size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
|
||||
s.resize(size);
|
||||
in.read(s.data(), size);
|
||||
}
|
||||
|
||||
void read(ACL & acl, ReadBuffer & in)
|
||||
{
|
||||
read(acl.permissions, in);
|
||||
read(acl.scheme, in);
|
||||
read(acl.id, in);
|
||||
}
|
||||
|
||||
void read(Stat & stat, ReadBuffer & in)
|
||||
{
|
||||
read(stat.czxid, in);
|
||||
read(stat.mzxid, in);
|
||||
read(stat.ctime, in);
|
||||
read(stat.mtime, in);
|
||||
read(stat.version, in);
|
||||
read(stat.cversion, in);
|
||||
read(stat.aversion, in);
|
||||
read(stat.ephemeralOwner, in);
|
||||
read(stat.dataLength, in);
|
||||
read(stat.numChildren, in);
|
||||
read(stat.pzxid, in);
|
||||
}
|
||||
|
||||
void read(Error & x, ReadBuffer & in)
|
||||
{
|
||||
int32_t code;
|
||||
read(code, in);
|
||||
x = Coordination::Error(code);
|
||||
}
|
||||
|
||||
}
|
74
src/Common/ZooKeeper/ZooKeeperIO.h
Normal file
74
src/Common/ZooKeeper/ZooKeeperIO.h
Normal file
@ -0,0 +1,74 @@
|
||||
#pragma once
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
using namespace DB;
|
||||
|
||||
void write(int64_t x, WriteBuffer & out);
|
||||
void write(int32_t x, WriteBuffer & out);
|
||||
void write(OpNum x, WriteBuffer & out);
|
||||
void write(bool x, WriteBuffer & out);
|
||||
void write(const std::string & s, WriteBuffer & out);
|
||||
void write(const ACL & acl, WriteBuffer & out);
|
||||
void write(const Stat & stat, WriteBuffer & out);
|
||||
void write(const Error & x, WriteBuffer & out);
|
||||
|
||||
template <size_t N>
|
||||
void write(const std::array<char, N> s, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(N), out);
|
||||
out.write(s.data(), N);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void write(const std::vector<T> & arr, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(arr.size()), out);
|
||||
for (const auto & elem : arr)
|
||||
write(elem, out);
|
||||
}
|
||||
|
||||
void read(int64_t & x, ReadBuffer & in);
|
||||
void read(int32_t & x, ReadBuffer & in);
|
||||
void read(OpNum & x, ReadBuffer & in);
|
||||
void read(bool & x, ReadBuffer & in);
|
||||
void read(int8_t & x, ReadBuffer & in);
|
||||
void read(std::string & s, ReadBuffer & in);
|
||||
void read(ACL & acl, ReadBuffer & in);
|
||||
void read(Stat & stat, ReadBuffer & in);
|
||||
void read(Error & x, ReadBuffer & in);
|
||||
|
||||
template <size_t N>
|
||||
void read(std::array<char, N> & s, ReadBuffer & in)
|
||||
{
|
||||
int32_t size = 0;
|
||||
read(size, in);
|
||||
if (size != N)
|
||||
throw Exception("Unexpected array size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
in.read(s.data(), N);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void read(std::vector<T> & arr, ReadBuffer & in)
|
||||
{
|
||||
int32_t size = 0;
|
||||
read(size, in);
|
||||
if (size < 0)
|
||||
throw Exception("Negative size while reading array from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
if (size > MAX_STRING_OR_ARRAY_SIZE)
|
||||
throw Exception("Too large array size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
arr.resize(size);
|
||||
for (auto & elem : arr)
|
||||
read(elem, in);
|
||||
}
|
||||
|
||||
}
|
@ -2,11 +2,12 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/setThreadName.h>
|
||||
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
@ -19,11 +20,6 @@
|
||||
#include <array>
|
||||
|
||||
|
||||
/// ZooKeeper has 1 MB node size and serialization limit by default,
|
||||
/// but it can be raised up, so we have a slightly larger limit on our side.
|
||||
#define MAX_STRING_OR_ARRAY_SIZE (1 << 28) /// 256 MiB
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event ZooKeeperInit;
|
||||
@ -266,137 +262,6 @@ namespace Coordination
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
/// Assuming we are at little endian.
|
||||
|
||||
static void write(int64_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap64(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
static void write(int32_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap32(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
static void write(bool x, WriteBuffer & out)
|
||||
{
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
static void write(const String & s, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(s.size()), out);
|
||||
out.write(s.data(), s.size());
|
||||
}
|
||||
|
||||
template <size_t N> void write(std::array<char, N> s, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(N), out);
|
||||
out.write(s.data(), N);
|
||||
}
|
||||
|
||||
template <typename T> void write(const std::vector<T> & arr, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(arr.size()), out);
|
||||
for (const auto & elem : arr)
|
||||
write(elem, out);
|
||||
}
|
||||
|
||||
static void write(const ACL & acl, WriteBuffer & out)
|
||||
{
|
||||
write(acl.permissions, out);
|
||||
write(acl.scheme, out);
|
||||
write(acl.id, out);
|
||||
}
|
||||
|
||||
|
||||
static void read(int64_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap64(x);
|
||||
}
|
||||
|
||||
static void read(int32_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap32(x);
|
||||
}
|
||||
|
||||
static void read(Error & x, ReadBuffer & in)
|
||||
{
|
||||
int32_t code;
|
||||
read(code, in);
|
||||
x = Error(code);
|
||||
}
|
||||
|
||||
static void read(bool & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
}
|
||||
|
||||
static void read(String & s, ReadBuffer & in)
|
||||
{
|
||||
int32_t size = 0;
|
||||
read(size, in);
|
||||
|
||||
if (size == -1)
|
||||
{
|
||||
/// It means that zookeeper node has NULL value. We will treat it like empty string.
|
||||
s.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
if (size < 0)
|
||||
throw Exception("Negative size while reading string from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
|
||||
if (size > MAX_STRING_OR_ARRAY_SIZE)
|
||||
throw Exception("Too large string size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
|
||||
s.resize(size);
|
||||
in.read(s.data(), size);
|
||||
}
|
||||
|
||||
template <size_t N> void read(std::array<char, N> & s, ReadBuffer & in)
|
||||
{
|
||||
int32_t size = 0;
|
||||
read(size, in);
|
||||
if (size != N)
|
||||
throw Exception("Unexpected array size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
in.read(s.data(), N);
|
||||
}
|
||||
|
||||
static void read(Stat & stat, ReadBuffer & in)
|
||||
{
|
||||
read(stat.czxid, in);
|
||||
read(stat.mzxid, in);
|
||||
read(stat.ctime, in);
|
||||
read(stat.mtime, in);
|
||||
read(stat.version, in);
|
||||
read(stat.cversion, in);
|
||||
read(stat.aversion, in);
|
||||
read(stat.ephemeralOwner, in);
|
||||
read(stat.dataLength, in);
|
||||
read(stat.numChildren, in);
|
||||
read(stat.pzxid, in);
|
||||
}
|
||||
|
||||
template <typename T> void read(std::vector<T> & arr, ReadBuffer & in)
|
||||
{
|
||||
int32_t size = 0;
|
||||
read(size, in);
|
||||
if (size < 0)
|
||||
throw Exception("Negative size while reading array from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
if (size > MAX_STRING_OR_ARRAY_SIZE)
|
||||
throw Exception("Too large array size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
arr.resize(size);
|
||||
for (auto & elem : arr)
|
||||
read(elem, in);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
void ZooKeeper::write(const T & x)
|
||||
{
|
||||
@ -409,19 +274,6 @@ void ZooKeeper::read(T & x)
|
||||
Coordination::read(x, *in);
|
||||
}
|
||||
|
||||
|
||||
void ZooKeeperRequest::write(WriteBuffer & out) const
|
||||
{
|
||||
/// Excessive copy to calculate length.
|
||||
WriteBufferFromOwnString buf;
|
||||
Coordination::write(xid, buf);
|
||||
Coordination::write(getOpNum(), buf);
|
||||
writeImpl(buf);
|
||||
Coordination::write(buf.str(), out);
|
||||
out.next();
|
||||
}
|
||||
|
||||
|
||||
static void removeRootPath(String & path, const String & root_path)
|
||||
{
|
||||
if (root_path.empty())
|
||||
@ -433,394 +285,6 @@ static void removeRootPath(String & path, const String & root_path)
|
||||
path = path.substr(root_path.size());
|
||||
}
|
||||
|
||||
|
||||
struct ZooKeeperResponse : virtual Response
|
||||
{
|
||||
virtual ~ZooKeeperResponse() override = default;
|
||||
virtual void readImpl(ReadBuffer &) = 0;
|
||||
};
|
||||
|
||||
|
||||
struct ZooKeeperHeartbeatRequest final : ZooKeeperRequest
|
||||
{
|
||||
String getPath() const override { return {}; }
|
||||
ZooKeeper::OpNum getOpNum() const override { return 11; }
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperHeartbeatResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
};
|
||||
|
||||
struct ZooKeeperWatchResponse final : WatchResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override
|
||||
{
|
||||
Coordination::read(type, in);
|
||||
Coordination::read(state, in);
|
||||
Coordination::read(path, in);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperAuthRequest final : ZooKeeperRequest
|
||||
{
|
||||
int32_t type = 0; /// ignored by the server
|
||||
String scheme;
|
||||
String data;
|
||||
|
||||
String getPath() const override { return {}; }
|
||||
ZooKeeper::OpNum getOpNum() const override { return 100; }
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
Coordination::write(type, out);
|
||||
Coordination::write(scheme, out);
|
||||
Coordination::write(data, out);
|
||||
}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperAuthResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
};
|
||||
|
||||
struct ZooKeeperCloseRequest final : ZooKeeperRequest
|
||||
{
|
||||
String getPath() const override { return {}; }
|
||||
ZooKeeper::OpNum getOpNum() const override { return -11; }
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCloseResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override
|
||||
{
|
||||
throw Exception("Received response for close request", Error::ZRUNTIMEINCONSISTENCY);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperCreateRequest final : CreateRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperCreateRequest() = default;
|
||||
explicit ZooKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {}
|
||||
|
||||
ZooKeeper::OpNum getOpNum() const override { return 1; }
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(data, out);
|
||||
Coordination::write(acls, out);
|
||||
|
||||
int32_t flags = 0;
|
||||
|
||||
if (is_ephemeral)
|
||||
flags |= 1;
|
||||
if (is_sequential)
|
||||
flags |= 2;
|
||||
|
||||
Coordination::write(flags, out);
|
||||
}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override
|
||||
{
|
||||
Coordination::read(path_created, in);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperRemoveRequest() = default;
|
||||
explicit ZooKeeperRemoveRequest(const RemoveRequest & base) : RemoveRequest(base) {}
|
||||
|
||||
ZooKeeper::OpNum getOpNum() const override { return 2; }
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
};
|
||||
|
||||
struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeper::OpNum getOpNum() const override { return 3; }
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperExistsResponse final : ExistsResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override
|
||||
{
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeper::OpNum getOpNum() const override { return 4; }
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperGetResponse final : GetResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override
|
||||
{
|
||||
Coordination::read(data, in);
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperSetRequest() = default;
|
||||
explicit ZooKeeperSetRequest(const SetRequest & base) : SetRequest(base) {}
|
||||
|
||||
ZooKeeper::OpNum getOpNum() const override { return 5; }
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(data, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override
|
||||
{
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperListRequest final : ListRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeper::OpNum getOpNum() const override { return 12; }
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperListResponse final : ListResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override
|
||||
{
|
||||
Coordination::read(names, in);
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperCheckRequest final : CheckRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperCheckRequest() = default;
|
||||
explicit ZooKeeperCheckRequest(const CheckRequest & base) : CheckRequest(base) {}
|
||||
|
||||
ZooKeeper::OpNum getOpNum() const override { return 13; }
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCheckResponse final : CheckResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
};
|
||||
|
||||
/// This response may be received only as an element of responses in MultiResponse.
|
||||
struct ZooKeeperErrorResponse final : ErrorResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override
|
||||
{
|
||||
Coordination::Error read_error;
|
||||
Coordination::read(read_error, in);
|
||||
|
||||
if (read_error != error)
|
||||
throw Exception(fmt::format("Error code in ErrorResponse ({}) doesn't match error code in header ({})", read_error, error),
|
||||
Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeper::OpNum getOpNum() const override { return 14; }
|
||||
|
||||
ZooKeeperMultiRequest(const Requests & generic_requests, const ACLs & default_acls)
|
||||
{
|
||||
/// Convert nested Requests to ZooKeeperRequests.
|
||||
/// Note that deep copy is required to avoid modifying path in presence of chroot prefix.
|
||||
requests.reserve(generic_requests.size());
|
||||
|
||||
for (const auto & generic_request : generic_requests)
|
||||
{
|
||||
if (const auto * concrete_request_create = dynamic_cast<const CreateRequest *>(generic_request.get()))
|
||||
{
|
||||
auto create = std::make_shared<ZooKeeperCreateRequest>(*concrete_request_create);
|
||||
if (create->acls.empty())
|
||||
create->acls = default_acls;
|
||||
requests.push_back(create);
|
||||
}
|
||||
else if (const auto * concrete_request_remove = dynamic_cast<const RemoveRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperRemoveRequest>(*concrete_request_remove));
|
||||
}
|
||||
else if (const auto * concrete_request_set = dynamic_cast<const SetRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperSetRequest>(*concrete_request_set));
|
||||
}
|
||||
else if (const auto * concrete_request_check = dynamic_cast<const CheckRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperCheckRequest>(*concrete_request_check));
|
||||
}
|
||||
else
|
||||
throw Exception("Illegal command as part of multi ZooKeeper request", Error::ZBADARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override
|
||||
{
|
||||
for (const auto & request : requests)
|
||||
{
|
||||
const auto & zk_request = dynamic_cast<const ZooKeeperRequest &>(*request);
|
||||
|
||||
bool done = false;
|
||||
int32_t error = -1;
|
||||
|
||||
Coordination::write(zk_request.getOpNum(), out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(error, out);
|
||||
|
||||
zk_request.writeImpl(out);
|
||||
}
|
||||
|
||||
ZooKeeper::OpNum op_num = -1;
|
||||
bool done = true;
|
||||
int32_t error = -1;
|
||||
|
||||
Coordination::write(op_num, out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(error, out);
|
||||
}
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
||||
{
|
||||
explicit ZooKeeperMultiResponse(const Requests & requests)
|
||||
{
|
||||
responses.reserve(requests.size());
|
||||
|
||||
for (const auto & request : requests)
|
||||
responses.emplace_back(dynamic_cast<const ZooKeeperRequest &>(*request).makeResponse());
|
||||
}
|
||||
|
||||
void readImpl(ReadBuffer & in) override
|
||||
{
|
||||
for (auto & response : responses)
|
||||
{
|
||||
ZooKeeper::OpNum op_num;
|
||||
bool done;
|
||||
Error op_error;
|
||||
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(op_error, in);
|
||||
|
||||
if (done)
|
||||
throw Exception("Not enough results received for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
|
||||
/// op_num == -1 is special for multi transaction.
|
||||
/// For unknown reason, error code is duplicated in header and in response body.
|
||||
|
||||
if (op_num == -1)
|
||||
response = std::make_shared<ZooKeeperErrorResponse>();
|
||||
|
||||
if (op_error != Error::ZOK)
|
||||
{
|
||||
response->error = op_error;
|
||||
|
||||
/// Set error for whole transaction.
|
||||
/// If some operations fail, ZK send global error as zero and then send details about each operation.
|
||||
/// It will set error code for first failed operation and it will set special "runtime inconsistency" code for other operations.
|
||||
if (error == Error::ZOK && op_error != Error::ZRUNTIMEINCONSISTENCY)
|
||||
error = op_error;
|
||||
}
|
||||
|
||||
if (op_error == Error::ZOK || op_num == -1)
|
||||
dynamic_cast<ZooKeeperResponse &>(*response).readImpl(in);
|
||||
}
|
||||
|
||||
/// Footer.
|
||||
{
|
||||
ZooKeeper::OpNum op_num;
|
||||
bool done;
|
||||
int32_t error_read;
|
||||
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(error_read, in);
|
||||
|
||||
if (!done)
|
||||
throw Exception("Too many results received for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
if (op_num != -1)
|
||||
throw Exception("Unexpected op_num received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
if (error_read != -1)
|
||||
throw Exception("Unexpected error value received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
ZooKeeperResponsePtr ZooKeeperHeartbeatRequest::makeResponse() const { return std::make_shared<ZooKeeperHeartbeatResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperAuthRequest::makeResponse() const { return std::make_shared<ZooKeeperAuthResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperCreateRequest::makeResponse() const { return std::make_shared<ZooKeeperCreateResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return std::make_shared<ZooKeeperRemoveResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return std::make_shared<ZooKeeperExistsResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return std::make_shared<ZooKeeperGetResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return std::make_shared<ZooKeeperSetResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return std::make_shared<ZooKeeperListResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperCheckRequest::makeResponse() const { return std::make_shared<ZooKeeperCheckResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const { return std::make_shared<ZooKeeperMultiResponse>(requests); }
|
||||
ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return std::make_shared<ZooKeeperCloseResponse>(); }
|
||||
|
||||
|
||||
static constexpr int32_t protocol_version = 0;
|
||||
|
||||
static constexpr ZooKeeper::XID watch_xid = -1;
|
||||
static constexpr ZooKeeper::XID ping_xid = -2;
|
||||
static constexpr ZooKeeper::XID auth_xid = -4;
|
||||
|
||||
static constexpr ZooKeeper::XID close_xid = 0x7FFFFFFF;
|
||||
|
||||
|
||||
ZooKeeper::~ZooKeeper()
|
||||
{
|
||||
try
|
||||
@ -995,7 +459,7 @@ void ZooKeeper::sendHandshake()
|
||||
std::array<char, passwd_len> passwd {};
|
||||
|
||||
write(handshake_length);
|
||||
write(protocol_version);
|
||||
write(ZOOKEEPER_PROTOCOL_VERSION);
|
||||
write(last_zxid_seen);
|
||||
write(timeout);
|
||||
write(previous_session_id);
|
||||
@ -1010,16 +474,15 @@ void ZooKeeper::receiveHandshake()
|
||||
int32_t handshake_length;
|
||||
int32_t protocol_version_read;
|
||||
int32_t timeout;
|
||||
constexpr int32_t passwd_len = 16;
|
||||
std::array<char, passwd_len> passwd;
|
||||
std::array<char, PASSWORD_LENGTH> passwd;
|
||||
|
||||
read(handshake_length);
|
||||
if (handshake_length != 36)
|
||||
throw Exception("Unexpected handshake length received: " + toString(handshake_length), Error::ZMARSHALLINGERROR);
|
||||
if (handshake_length != SERVER_HANDSHAKE_LENGTH)
|
||||
throw Exception("Unexpected handshake length received: " + DB::toString(handshake_length), Error::ZMARSHALLINGERROR);
|
||||
|
||||
read(protocol_version_read);
|
||||
if (protocol_version_read != protocol_version)
|
||||
throw Exception("Unexpected protocol version: " + toString(protocol_version_read), Error::ZMARSHALLINGERROR);
|
||||
if (protocol_version_read != ZOOKEEPER_PROTOCOL_VERSION)
|
||||
throw Exception("Unexpected protocol version: " + DB::toString(protocol_version_read), Error::ZMARSHALLINGERROR);
|
||||
|
||||
read(timeout);
|
||||
if (timeout != session_timeout.totalMilliseconds())
|
||||
@ -1036,7 +499,7 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
|
||||
ZooKeeperAuthRequest request;
|
||||
request.scheme = scheme;
|
||||
request.data = data;
|
||||
request.xid = auth_xid;
|
||||
request.xid = AUTH_XID;
|
||||
request.write(*out);
|
||||
|
||||
int32_t length;
|
||||
@ -1050,17 +513,17 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
|
||||
read(zxid);
|
||||
read(err);
|
||||
|
||||
if (read_xid != auth_xid)
|
||||
throw Exception("Unexpected event received in reply to auth request: " + toString(read_xid),
|
||||
if (read_xid != AUTH_XID)
|
||||
throw Exception("Unexpected event received in reply to auth request: " + DB::toString(read_xid),
|
||||
Error::ZMARSHALLINGERROR);
|
||||
|
||||
int32_t actual_length = in->count() - count_before_event;
|
||||
if (length != actual_length)
|
||||
throw Exception("Response length doesn't match. Expected: " + toString(length) + ", actual: " + toString(actual_length),
|
||||
throw Exception("Response length doesn't match. Expected: " + DB::toString(length) + ", actual: " + DB::toString(actual_length),
|
||||
Error::ZMARSHALLINGERROR);
|
||||
|
||||
if (err != Error::ZOK)
|
||||
throw Exception("Error received in reply to auth request. Code: " + toString(int32_t(err)) + ". Message: " + String(errorMessage(err)),
|
||||
throw Exception("Error received in reply to auth request. Code: " + DB::toString(int32_t(err)) + ". Message: " + String(errorMessage(err)),
|
||||
Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
|
||||
@ -1093,7 +556,7 @@ void ZooKeeper::sendThread()
|
||||
/// After we popped element from the queue, we must register callbacks (even in the case when expired == true right now),
|
||||
/// because they must not be lost (callbacks must be called because the user will wait for them).
|
||||
|
||||
if (info.request->xid != close_xid)
|
||||
if (info.request->xid != CLOSE_XID)
|
||||
{
|
||||
CurrentMetrics::add(CurrentMetrics::ZooKeeperRequest);
|
||||
std::lock_guard lock(operations_mutex);
|
||||
@ -1107,7 +570,9 @@ void ZooKeeper::sendThread()
|
||||
}
|
||||
|
||||
if (expired)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
info.request->addRootPath(root_path);
|
||||
|
||||
@ -1115,7 +580,7 @@ void ZooKeeper::sendThread()
|
||||
info.request->write(*out);
|
||||
|
||||
/// We sent close request, exit
|
||||
if (info.request->xid == close_xid)
|
||||
if (info.request->xid == CLOSE_XID)
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1125,7 +590,7 @@ void ZooKeeper::sendThread()
|
||||
prev_heartbeat_time = clock::now();
|
||||
|
||||
ZooKeeperHeartbeatRequest request;
|
||||
request.xid = ping_xid;
|
||||
request.xid = PING_XID;
|
||||
request.write(*out);
|
||||
}
|
||||
|
||||
@ -1179,7 +644,9 @@ void ZooKeeper::receiveThread()
|
||||
else
|
||||
{
|
||||
if (earliest_operation)
|
||||
throw Exception("Operation timeout (no response) for path: " + earliest_operation->request->getPath(), Error::ZOPERATIONTIMEOUT);
|
||||
{
|
||||
throw Exception("Operation timeout (no response) for request " + toString(earliest_operation->request->getOpNum()) + " for path: " + earliest_operation->request->getPath(), Error::ZOPERATIONTIMEOUT);
|
||||
}
|
||||
waited += max_wait;
|
||||
if (waited >= session_timeout.totalMicroseconds())
|
||||
throw Exception("Nothing is received in session timeout", Error::ZOPERATIONTIMEOUT);
|
||||
@ -1213,14 +680,14 @@ void ZooKeeper::receiveEvent()
|
||||
RequestInfo request_info;
|
||||
ZooKeeperResponsePtr response;
|
||||
|
||||
if (xid == ping_xid)
|
||||
if (xid == PING_XID)
|
||||
{
|
||||
if (err != Error::ZOK)
|
||||
throw Exception("Received error in heartbeat response: " + String(errorMessage(err)), Error::ZRUNTIMEINCONSISTENCY);
|
||||
|
||||
response = std::make_shared<ZooKeeperHeartbeatResponse>();
|
||||
}
|
||||
else if (xid == watch_xid)
|
||||
else if (xid == WATCH_XID)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::ZooKeeperWatchResponse);
|
||||
response = std::make_shared<ZooKeeperWatchResponse>();
|
||||
@ -1261,7 +728,7 @@ void ZooKeeper::receiveEvent()
|
||||
|
||||
auto it = operations.find(xid);
|
||||
if (it == operations.end())
|
||||
throw Exception("Received response for unknown xid", Error::ZRUNTIMEINCONSISTENCY);
|
||||
throw Exception("Received response for unknown xid " + DB::toString(xid), Error::ZRUNTIMEINCONSISTENCY);
|
||||
|
||||
/// After this point, we must invoke callback, that we've grabbed from 'operations'.
|
||||
/// Invariant: all callbacks are invoked either in case of success or in case of error.
|
||||
@ -1282,13 +749,14 @@ void ZooKeeper::receiveEvent()
|
||||
response = request_info.request->makeResponse();
|
||||
|
||||
if (err != Error::ZOK)
|
||||
{
|
||||
response->error = err;
|
||||
}
|
||||
else
|
||||
{
|
||||
response->readImpl(*in);
|
||||
response->removeRootPath(root_path);
|
||||
}
|
||||
|
||||
/// Instead of setting the watch in sendEvent, set it in receiveEvent because need to check the response.
|
||||
/// The watch shouldn't be set if the node does not exist and it will never exist like sequential ephemeral nodes.
|
||||
/// By using getData() instead of exists(), a watch won't be set if the node doesn't exist.
|
||||
@ -1298,7 +766,7 @@ void ZooKeeper::receiveEvent()
|
||||
/// 3 indicates the ZooKeeperExistsRequest.
|
||||
// For exists, we set the watch on both node exist and nonexist case.
|
||||
// For other case like getData, we only set the watch when node exists.
|
||||
if (request_info.request->getOpNum() == 3)
|
||||
if (request_info.request->getOpNum() == OpNum::Exists)
|
||||
add_watch = (response->error == Error::ZOK || response->error == Error::ZNONODE);
|
||||
else
|
||||
add_watch = response->error == Error::ZOK;
|
||||
@ -1315,7 +783,7 @@ void ZooKeeper::receiveEvent()
|
||||
|
||||
int32_t actual_length = in->count() - count_before_event;
|
||||
if (length != actual_length)
|
||||
throw Exception("Response length doesn't match. Expected: " + toString(length) + ", actual: " + toString(actual_length), Error::ZMARSHALLINGERROR);
|
||||
throw Exception("Response length doesn't match. Expected: " + DB::toString(length) + ", actual: " + DB::toString(actual_length), Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -1508,7 +976,7 @@ void ZooKeeper::pushRequest(RequestInfo && info)
|
||||
if (!info.request->xid)
|
||||
{
|
||||
info.request->xid = next_xid.fetch_add(1);
|
||||
if (info.request->xid == close_xid)
|
||||
if (info.request->xid == CLOSE_XID)
|
||||
throw Exception("xid equal to close_xid", Error::ZSESSIONEXPIRED);
|
||||
if (info.request->xid < 0)
|
||||
throw Exception("XID overflow", Error::ZSESSIONEXPIRED);
|
||||
@ -1688,7 +1156,7 @@ void ZooKeeper::multi(
|
||||
void ZooKeeper::close()
|
||||
{
|
||||
ZooKeeperCloseRequest request;
|
||||
request.xid = close_xid;
|
||||
request.xid = CLOSE_XID;
|
||||
|
||||
RequestInfo request_info;
|
||||
request_info.request = std::make_shared<ZooKeeperCloseRequest>(std::move(request));
|
||||
@ -1699,5 +1167,4 @@ void ZooKeeper::close()
|
||||
ProfileEvents::increment(ProfileEvents::ZooKeeperClose);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
@ -85,9 +86,6 @@ namespace Coordination
|
||||
|
||||
using namespace DB;
|
||||
|
||||
struct ZooKeeperRequest;
|
||||
|
||||
|
||||
/** Usage scenario: look at the documentation for IKeeper class.
|
||||
*/
|
||||
class ZooKeeper : public IKeeper
|
||||
@ -101,9 +99,6 @@ public:
|
||||
|
||||
using Nodes = std::vector<Node>;
|
||||
|
||||
using XID = int32_t;
|
||||
using OpNum = int32_t;
|
||||
|
||||
/** Connection to nodes is performed in order. If you want, shuffle them manually.
|
||||
* Operation timeout couldn't be greater than session timeout.
|
||||
* Operation timeout applies independently for network read, network write, waiting for events and synchronization.
|
||||
@ -196,7 +191,7 @@ private:
|
||||
|
||||
struct RequestInfo
|
||||
{
|
||||
std::shared_ptr<ZooKeeperRequest> request;
|
||||
ZooKeeperRequestPtr request;
|
||||
ResponseCallback callback;
|
||||
WatchCallback watch;
|
||||
clock::time_point time;
|
||||
@ -249,31 +244,4 @@ private:
|
||||
CurrentMetrics::Increment active_session_metric_increment{CurrentMetrics::ZooKeeperSession};
|
||||
};
|
||||
|
||||
struct ZooKeeperResponse;
|
||||
using ZooKeeperResponsePtr = std::shared_ptr<ZooKeeperResponse>;
|
||||
|
||||
/// Exposed in header file for Yandex.Metrica code.
|
||||
struct ZooKeeperRequest : virtual Request
|
||||
{
|
||||
ZooKeeper::XID xid = 0;
|
||||
bool has_watch = false;
|
||||
/// If the request was not send and the error happens, we definitely sure, that is has not been processed by the server.
|
||||
/// If the request was sent and we didn't get the response and the error happens, then we cannot be sure was it processed or not.
|
||||
bool probably_sent = false;
|
||||
|
||||
ZooKeeperRequest() = default;
|
||||
ZooKeeperRequest(const ZooKeeperRequest &) = default;
|
||||
virtual ~ZooKeeperRequest() override = default;
|
||||
|
||||
virtual ZooKeeper::OpNum getOpNum() const = 0;
|
||||
|
||||
/// Writes length, xid, op_num, then the rest.
|
||||
void write(WriteBuffer & out) const;
|
||||
|
||||
virtual void writeImpl(WriteBuffer &) const = 0;
|
||||
|
||||
virtual ZooKeeperResponsePtr makeResponse() const = 0;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
@ -25,7 +25,8 @@ int main(int argc, char ** argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
const SymbolIndex & symbol_index = SymbolIndex::instance();
|
||||
auto symbol_index_ptr = SymbolIndex::instance();
|
||||
const SymbolIndex & symbol_index = *symbol_index_ptr;
|
||||
|
||||
for (const auto & elem : symbol_index.symbols())
|
||||
std::cout << elem.name << ": " << elem.address_begin << " ... " << elem.address_end << "\n";
|
||||
|
@ -80,7 +80,11 @@ SRCS(
|
||||
WeakHash.cpp
|
||||
ZooKeeper/IKeeper.cpp
|
||||
ZooKeeper/TestKeeper.cpp
|
||||
ZooKeeper/TestKeeperStorage.cpp
|
||||
ZooKeeper/ZooKeeper.cpp
|
||||
ZooKeeper/ZooKeeperCommon.cpp
|
||||
ZooKeeper/ZooKeeperConstants.cpp
|
||||
ZooKeeper/ZooKeeperIO.cpp
|
||||
ZooKeeper/ZooKeeperImpl.cpp
|
||||
ZooKeeper/ZooKeeperNodeCache.cpp
|
||||
checkStackSize.cpp
|
||||
|
@ -114,15 +114,15 @@ private:
|
||||
static std::enable_if_t<IsDecimalNumber<T> && IsDecimalNumber<U>, Shift>
|
||||
getScales(const DataTypePtr & left_type, const DataTypePtr & right_type)
|
||||
{
|
||||
const DataTypeDecimal<T> * decimal0 = checkDecimal<T>(*left_type);
|
||||
const DataTypeDecimal<U> * decimal1 = checkDecimal<U>(*right_type);
|
||||
const DataTypeDecimalBase<T> * decimal0 = checkDecimalBase<T>(*left_type);
|
||||
const DataTypeDecimalBase<U> * decimal1 = checkDecimalBase<U>(*right_type);
|
||||
|
||||
Shift shift;
|
||||
if (decimal0 && decimal1)
|
||||
{
|
||||
auto result_type = decimalResultType<false, false>(*decimal0, *decimal1);
|
||||
shift.a = static_cast<CompareInt>(result_type.scaleFactorFor(*decimal0, false).value);
|
||||
shift.b = static_cast<CompareInt>(result_type.scaleFactorFor(*decimal1, false).value);
|
||||
auto result_type = DecimalUtils::binaryOpResult<false, false>(*decimal0, *decimal1);
|
||||
shift.a = static_cast<CompareInt>(result_type.scaleFactorFor(decimal0->getTrait(), false).value);
|
||||
shift.b = static_cast<CompareInt>(result_type.scaleFactorFor(decimal1->getTrait(), false).value);
|
||||
}
|
||||
else if (decimal0)
|
||||
shift.b = static_cast<CompareInt>(decimal0->getScaleMultiplier().value);
|
||||
@ -137,7 +137,7 @@ private:
|
||||
getScales(const DataTypePtr & left_type, const DataTypePtr &)
|
||||
{
|
||||
Shift shift;
|
||||
const DataTypeDecimal<T> * decimal0 = checkDecimal<T>(*left_type);
|
||||
const DataTypeDecimalBase<T> * decimal0 = checkDecimalBase<T>(*left_type);
|
||||
if (decimal0)
|
||||
shift.b = static_cast<CompareInt>(decimal0->getScaleMultiplier().value);
|
||||
return shift;
|
||||
@ -148,7 +148,7 @@ private:
|
||||
getScales(const DataTypePtr &, const DataTypePtr & right_type)
|
||||
{
|
||||
Shift shift;
|
||||
const DataTypeDecimal<U> * decimal1 = checkDecimal<U>(*right_type);
|
||||
const DataTypeDecimalBase<U> * decimal1 = checkDecimalBase<U>(*right_type);
|
||||
if (decimal1)
|
||||
shift.a = static_cast<CompareInt>(decimal1->getScaleMultiplier().value);
|
||||
return shift;
|
||||
|
@ -11,9 +11,13 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
class DataTypeNumber;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int DECIMAL_OVERFLOW;
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
}
|
||||
|
||||
namespace DecimalUtils
|
||||
@ -23,6 +27,7 @@ static constexpr size_t minPrecision() { return 1; }
|
||||
template <typename T> static constexpr size_t maxPrecision() { return 0; }
|
||||
template <> constexpr size_t maxPrecision<Decimal32>() { return 9; }
|
||||
template <> constexpr size_t maxPrecision<Decimal64>() { return 18; }
|
||||
template <> constexpr size_t maxPrecision<DateTime64>() { return 18; }
|
||||
template <> constexpr size_t maxPrecision<Decimal128>() { return 38; }
|
||||
template <> constexpr size_t maxPrecision<Decimal256>() { return 76; }
|
||||
|
||||
@ -31,7 +36,7 @@ inline auto scaleMultiplier(UInt32 scale)
|
||||
{
|
||||
if constexpr (std::is_same_v<T, Int32> || std::is_same_v<T, Decimal32>)
|
||||
return common::exp10_i32(scale);
|
||||
else if constexpr (std::is_same_v<T, Int64> || std::is_same_v<T, Decimal64>)
|
||||
else if constexpr (std::is_same_v<T, Int64> || std::is_same_v<T, Decimal64> || std::is_same_v<T, DateTime64>)
|
||||
return common::exp10_i64(scale);
|
||||
else if constexpr (std::is_same_v<T, Int128> || std::is_same_v<T, Decimal128>)
|
||||
return common::exp10_i128(scale);
|
||||
@ -51,6 +56,30 @@ struct DecimalComponents
|
||||
T fractional;
|
||||
};
|
||||
|
||||
/// Traits used for determining final Type/Precision/Scale for certain math operations on decimals.
|
||||
template <typename T>
|
||||
struct DataTypeDecimalTrait
|
||||
{
|
||||
using FieldType = T;
|
||||
const UInt32 precision;
|
||||
const UInt32 scale;
|
||||
|
||||
DataTypeDecimalTrait(UInt32 precision_, UInt32 scale_)
|
||||
: precision(precision_),
|
||||
scale(scale_)
|
||||
{}
|
||||
|
||||
/// @returns multiplier for U to become T with correct scale
|
||||
template <typename U>
|
||||
T scaleFactorFor(const DataTypeDecimalTrait<U> & x, bool) const
|
||||
{
|
||||
if (scale < x.scale)
|
||||
throw Exception("Decimal result's scale is less than argument's one", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
const UInt32 scale_delta = scale - x.scale; /// scale_delta >= 0
|
||||
return DecimalUtils::scaleMultiplier<typename T::NativeType>(scale_delta);
|
||||
}
|
||||
};
|
||||
|
||||
/** Make a decimal value from whole and fractional components with given scale multiplier.
|
||||
* where scale_multiplier = scaleMultiplier<T>(scale)
|
||||
* this is to reduce number of calls to scaleMultiplier when scale is known.
|
||||
@ -211,6 +240,35 @@ To convertTo(const DecimalType & decimal, size_t scale)
|
||||
}
|
||||
}
|
||||
|
||||
template <bool is_multiply, bool is_division, typename T, typename U, template <typename> typename DecimalType>
|
||||
inline auto binaryOpResult(const DecimalType<T> & tx, const DecimalType<U> & ty)
|
||||
{
|
||||
UInt32 scale{};
|
||||
if constexpr (is_multiply)
|
||||
scale = tx.getScale() + ty.getScale();
|
||||
else if constexpr (is_division)
|
||||
scale = tx.getScale();
|
||||
else
|
||||
scale = (tx.getScale() > ty.getScale() ? tx.getScale() : ty.getScale());
|
||||
|
||||
if constexpr (sizeof(T) < sizeof(U))
|
||||
return DataTypeDecimalTrait<U>(DecimalUtils::maxPrecision<U>(), scale);
|
||||
else
|
||||
return DataTypeDecimalTrait<T>(DecimalUtils::maxPrecision<T>(), scale);
|
||||
}
|
||||
|
||||
template <bool, bool, typename T, typename U, template <typename> typename DecimalType>
|
||||
inline const DataTypeDecimalTrait<T> binaryOpResult(const DecimalType<T> & tx, const DataTypeNumber<U> &)
|
||||
{
|
||||
return DataTypeDecimalTrait<T>(DecimalUtils::maxPrecision<T>(), tx.getScale());
|
||||
}
|
||||
|
||||
template <bool, bool, typename T, typename U, template <typename> typename DecimalType>
|
||||
inline const DataTypeDecimalTrait<U> binaryOpResult(const DataTypeNumber<T> &, const DecimalType<U> & ty)
|
||||
{
|
||||
return DataTypeDecimalTrait<U>(DecimalUtils::maxPrecision<U>(), ty.getScale());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -73,8 +73,10 @@
|
||||
/// Minimum revision supporting interserver secret.
|
||||
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET 54441
|
||||
|
||||
#define DBMS_MIN_REVISION_WITH_X_FORWARDED_FOR_IN_CLIENT_INFO 54443
|
||||
|
||||
/// Version of ClickHouse TCP protocol. Increment it manually when you change the protocol.
|
||||
#define DBMS_TCP_PROTOCOL_VERSION 54442
|
||||
#define DBMS_TCP_PROTOCOL_VERSION 54443
|
||||
|
||||
/// The boundary on which the blocks for asynchronous file operations should be aligned.
|
||||
#define DEFAULT_AIO_FILE_BLOCK_SIZE 4096
|
||||
|
@ -592,6 +592,10 @@ template <> bool decimalEqual(Decimal256 x, Decimal256 y, UInt32 x_scale, UInt32
|
||||
template <> bool decimalLess(Decimal256 x, Decimal256 y, UInt32 x_scale, UInt32 y_scale) { return decLess(x, y, x_scale, y_scale); }
|
||||
template <> bool decimalLessOrEqual(Decimal256 x, Decimal256 y, UInt32 x_scale, UInt32 y_scale) { return decLessOrEqual(x, y, x_scale, y_scale); }
|
||||
|
||||
template <> bool decimalEqual(DateTime64 x, DateTime64 y, UInt32 x_scale, UInt32 y_scale) { return decEqual(x, y, x_scale, y_scale); }
|
||||
template <> bool decimalLess(DateTime64 x, DateTime64 y, UInt32 x_scale, UInt32 y_scale) { return decLess(x, y, x_scale, y_scale); }
|
||||
template <> bool decimalLessOrEqual(DateTime64 x, DateTime64 y, UInt32 x_scale, UInt32 y_scale) { return decLessOrEqual(x, y, x_scale, y_scale); }
|
||||
|
||||
inline void writeText(const Null &, WriteBuffer & buf)
|
||||
{
|
||||
writeText(std::string("Null"), buf);
|
||||
|
@ -193,10 +193,12 @@ template <> struct NearestFieldTypeImpl<Decimal32> { using Type = DecimalField<D
|
||||
template <> struct NearestFieldTypeImpl<Decimal64> { using Type = DecimalField<Decimal64>; };
|
||||
template <> struct NearestFieldTypeImpl<Decimal128> { using Type = DecimalField<Decimal128>; };
|
||||
template <> struct NearestFieldTypeImpl<Decimal256> { using Type = DecimalField<Decimal256>; };
|
||||
template <> struct NearestFieldTypeImpl<DateTime64> { using Type = DecimalField<DateTime64>; };
|
||||
template <> struct NearestFieldTypeImpl<DecimalField<Decimal32>> { using Type = DecimalField<Decimal32>; };
|
||||
template <> struct NearestFieldTypeImpl<DecimalField<Decimal64>> { using Type = DecimalField<Decimal64>; };
|
||||
template <> struct NearestFieldTypeImpl<DecimalField<Decimal128>> { using Type = DecimalField<Decimal128>; };
|
||||
template <> struct NearestFieldTypeImpl<DecimalField<Decimal256>> { using Type = DecimalField<Decimal256>; };
|
||||
template <> struct NearestFieldTypeImpl<DecimalField<DateTime64>> { using Type = DecimalField<DateTime64>; };
|
||||
template <> struct NearestFieldTypeImpl<Float32> { using Type = Float64; };
|
||||
template <> struct NearestFieldTypeImpl<Float64> { using Type = Float64; };
|
||||
template <> struct NearestFieldTypeImpl<const char *> { using Type = String; };
|
||||
@ -731,6 +733,7 @@ template <> struct Field::TypeToEnum<DecimalField<Decimal32>>{ static const Type
|
||||
template <> struct Field::TypeToEnum<DecimalField<Decimal64>>{ static const Types::Which value = Types::Decimal64; };
|
||||
template <> struct Field::TypeToEnum<DecimalField<Decimal128>>{ static const Types::Which value = Types::Decimal128; };
|
||||
template <> struct Field::TypeToEnum<DecimalField<Decimal256>>{ static const Types::Which value = Types::Decimal256; };
|
||||
template <> struct Field::TypeToEnum<DecimalField<DateTime64>>{ static const Types::Which value = Types::Decimal64; };
|
||||
template <> struct Field::TypeToEnum<AggregateFunctionStateData>{ static const Types::Which value = Types::AggregateFunctionState; };
|
||||
template <> struct Field::TypeToEnum<UInt256> { static const Types::Which value = Types::UInt256; };
|
||||
template <> struct Field::TypeToEnum<Int256> { static const Types::Which value = Types::Int256; };
|
||||
|
@ -184,29 +184,44 @@ using Decimal64 = Decimal<Int64>;
|
||||
using Decimal128 = Decimal<Int128>;
|
||||
using Decimal256 = Decimal<Int256>;
|
||||
|
||||
using DateTime64 = Decimal64;
|
||||
// Distinguishable type to allow function resultion/deduction based on value type,
|
||||
// but also relatively easy to convert to/from Decimal64.
|
||||
class DateTime64 : public Decimal64
|
||||
{
|
||||
public:
|
||||
using Base = Decimal64;
|
||||
using Base::Base;
|
||||
|
||||
DateTime64(const Base & v)
|
||||
: Base(v)
|
||||
{}
|
||||
};
|
||||
|
||||
template <> struct TypeName<Decimal32> { static constexpr const char * get() { return "Decimal32"; } };
|
||||
template <> struct TypeName<Decimal64> { static constexpr const char * get() { return "Decimal64"; } };
|
||||
template <> struct TypeName<Decimal128> { static constexpr const char * get() { return "Decimal128"; } };
|
||||
template <> struct TypeName<Decimal256> { static constexpr const char * get() { return "Decimal256"; } };
|
||||
template <> struct TypeName<DateTime64> { static constexpr const char * get() { return "DateTime64"; } };
|
||||
|
||||
template <> struct TypeId<Decimal32> { static constexpr const TypeIndex value = TypeIndex::Decimal32; };
|
||||
template <> struct TypeId<Decimal64> { static constexpr const TypeIndex value = TypeIndex::Decimal64; };
|
||||
template <> struct TypeId<Decimal128> { static constexpr const TypeIndex value = TypeIndex::Decimal128; };
|
||||
template <> struct TypeId<Decimal256> { static constexpr const TypeIndex value = TypeIndex::Decimal256; };
|
||||
template <> struct TypeId<DateTime64> { static constexpr const TypeIndex value = TypeIndex::DateTime64; };
|
||||
|
||||
template <typename T> constexpr bool IsDecimalNumber = false;
|
||||
template <> inline constexpr bool IsDecimalNumber<Decimal32> = true;
|
||||
template <> inline constexpr bool IsDecimalNumber<Decimal64> = true;
|
||||
template <> inline constexpr bool IsDecimalNumber<Decimal128> = true;
|
||||
template <> inline constexpr bool IsDecimalNumber<Decimal256> = true;
|
||||
template <> inline constexpr bool IsDecimalNumber<DateTime64> = true;
|
||||
|
||||
template <typename T> struct NativeType { using Type = T; };
|
||||
template <> struct NativeType<Decimal32> { using Type = Int32; };
|
||||
template <> struct NativeType<Decimal64> { using Type = Int64; };
|
||||
template <> struct NativeType<Decimal128> { using Type = Int128; };
|
||||
template <> struct NativeType<Decimal256> { using Type = Int256; };
|
||||
template <> struct NativeType<DateTime64> { using Type = Int64; };
|
||||
|
||||
template <typename T> constexpr bool OverBigInt = false;
|
||||
template <> inline constexpr bool OverBigInt<Int256> = true;
|
||||
@ -275,6 +290,15 @@ namespace std
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct hash<DB::DateTime64>
|
||||
{
|
||||
size_t operator()(const DB::DateTime64 & x) const
|
||||
{
|
||||
return std::hash<std::decay_t<decltype(x)>::NativeType>()(x);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <>
|
||||
struct hash<DB::Decimal256>
|
||||
|
@ -141,7 +141,7 @@ inline bool callOnBasicTypes(TypeIndex type_num1, TypeIndex type_num2, F && f)
|
||||
{
|
||||
case TypeIndex::Date: return callOnBasicType<UInt16, _int, _float, _decimal, _datetime>(type_num2, std::forward<F>(f));
|
||||
case TypeIndex::DateTime: return callOnBasicType<UInt32, _int, _float, _decimal, _datetime>(type_num2, std::forward<F>(f));
|
||||
case TypeIndex::DateTime64: return callOnBasicType<Decimal64, _int, _float, _decimal, _datetime>(type_num2, std::forward<F>(f));
|
||||
case TypeIndex::DateTime64: return callOnBasicType<DateTime64, _int, _float, _decimal, _datetime>(type_num2, std::forward<F>(f));
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ namespace DB
|
||||
class DataTypeDateTime64 final : public DataTypeDecimalBase<DateTime64>, public TimezoneMixin
|
||||
{
|
||||
public:
|
||||
using Base = DataTypeDecimalBase<DateTime64>;
|
||||
static constexpr UInt8 default_scale = 3;
|
||||
|
||||
static constexpr auto family_name = "DateTime64";
|
||||
|
@ -102,5 +102,6 @@ template class DataTypeDecimalBase<Decimal32>;
|
||||
template class DataTypeDecimalBase<Decimal64>;
|
||||
template class DataTypeDecimalBase<Decimal128>;
|
||||
template class DataTypeDecimalBase<Decimal256>;
|
||||
template class DataTypeDecimalBase<DateTime64>;
|
||||
|
||||
}
|
||||
|
@ -150,39 +150,45 @@ public:
|
||||
|
||||
static T getScaleMultiplier(UInt32 scale);
|
||||
|
||||
inline DecimalUtils::DataTypeDecimalTrait<T> getTrait() const
|
||||
{
|
||||
return {precision, scale};
|
||||
}
|
||||
|
||||
protected:
|
||||
const UInt32 precision;
|
||||
const UInt32 scale;
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline const DataTypeDecimalBase<T> * checkDecimalBase(const IDataType & data_type)
|
||||
{
|
||||
if (isColumnedAsDecimalT<T>(data_type))
|
||||
return static_cast<const DataTypeDecimalBase<T> *>(&data_type);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <bool is_multiply, bool is_division, typename T, typename U, template <typename> typename DecimalType>
|
||||
inline auto decimalResultType(const DecimalType<T> & tx, const DecimalType<U> & ty)
|
||||
{
|
||||
UInt32 scale{};
|
||||
if constexpr (is_multiply)
|
||||
scale = tx.getScale() + ty.getScale();
|
||||
else if constexpr (is_division)
|
||||
scale = tx.getScale();
|
||||
else
|
||||
scale = (tx.getScale() > ty.getScale() ? tx.getScale() : ty.getScale());
|
||||
|
||||
if constexpr (sizeof(T) < sizeof(U))
|
||||
return DecimalType<U>(DecimalUtils::maxPrecision<U>(), scale);
|
||||
else
|
||||
return DecimalType<T>(DecimalUtils::maxPrecision<T>(), scale);
|
||||
const auto result_trait = DecimalUtils::binaryOpResult<is_multiply, is_division>(tx, ty);
|
||||
return DecimalType<typename decltype(result_trait)::FieldType>(result_trait.precision, result_trait.scale);
|
||||
}
|
||||
|
||||
template <bool, bool, typename T, typename U, template <typename> typename DecimalType>
|
||||
inline const DecimalType<T> decimalResultType(const DecimalType<T> & tx, const DataTypeNumber<U> &)
|
||||
template <bool is_multiply, bool is_division, typename T, typename U, template <typename> typename DecimalType>
|
||||
inline const DecimalType<T> decimalResultType(const DecimalType<T> & tx, const DataTypeNumber<U> & ty)
|
||||
{
|
||||
return DecimalType<T>(DecimalUtils::maxPrecision<T>(), tx.getScale());
|
||||
const auto result_trait = DecimalUtils::binaryOpResult<is_multiply, is_division>(tx, ty);
|
||||
return DecimalType<typename decltype(result_trait)::FieldType>(result_trait.precision, result_trait.scale);
|
||||
}
|
||||
|
||||
template <bool, bool, typename T, typename U, template <typename> typename DecimalType>
|
||||
inline const DecimalType<U> decimalResultType(const DataTypeNumber<T> &, const DecimalType<U> & ty)
|
||||
template <bool is_multiply, bool is_division, typename T, typename U, template <typename> typename DecimalType>
|
||||
inline const DecimalType<U> decimalResultType(const DataTypeNumber<T> & tx, const DecimalType<U> & ty)
|
||||
{
|
||||
return DecimalType<U>(DecimalUtils::maxPrecision<U>(), ty.getScale());
|
||||
const auto result_trait = DecimalUtils::binaryOpResult<is_multiply, is_division>(tx, ty);
|
||||
return DecimalType<typename decltype(result_trait)::FieldType>(result_trait.precision, result_trait.scale);
|
||||
}
|
||||
|
||||
template <template <typename> typename DecimalType>
|
||||
|
@ -606,6 +606,14 @@ inline bool isColumnedAsDecimal(const T & data_type)
|
||||
return which.isDecimal() || which.isDateTime64();
|
||||
}
|
||||
|
||||
// Same as isColumnedAsDecimal but also checks value type of underlyig column.
|
||||
template <typename T, typename DataType>
|
||||
inline bool isColumnedAsDecimalT(const DataType & data_type)
|
||||
{
|
||||
const WhichDataType which(data_type);
|
||||
return (which.isDecimal() || which.isDateTime64()) && which.idx == TypeId<T>::value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool isString(const T & data_type)
|
||||
{
|
||||
|
@ -36,7 +36,7 @@ static std::unordered_map<String, String> fetchTablesCreateQuery(
|
||||
|
||||
MySQLBlockInputStream show_create_table(
|
||||
connection, "SHOW CREATE TABLE " + backQuoteIfNeed(database_name) + "." + backQuoteIfNeed(fetch_table_name),
|
||||
show_create_table_header, DEFAULT_BLOCK_SIZE);
|
||||
show_create_table_header, DEFAULT_BLOCK_SIZE, false, true);
|
||||
|
||||
Block create_query_block = show_create_table.read();
|
||||
if (!create_query_block || create_query_block.rows() != 1)
|
||||
@ -77,7 +77,7 @@ void MaterializeMetadata::fetchMasterStatus(mysqlxx::PoolWithFailover::Entry & c
|
||||
{std::make_shared<DataTypeString>(), "Executed_Gtid_Set"},
|
||||
};
|
||||
|
||||
MySQLBlockInputStream input(connection, "SHOW MASTER STATUS;", header, DEFAULT_BLOCK_SIZE);
|
||||
MySQLBlockInputStream input(connection, "SHOW MASTER STATUS;", header, DEFAULT_BLOCK_SIZE, false, true);
|
||||
Block master_status = input.read();
|
||||
|
||||
if (!master_status || master_status.rows() != 1)
|
||||
@ -99,7 +99,7 @@ void MaterializeMetadata::fetchMasterVariablesValue(const mysqlxx::PoolWithFailo
|
||||
};
|
||||
|
||||
const String & fetch_query = "SHOW VARIABLES WHERE Variable_name = 'binlog_checksum'";
|
||||
MySQLBlockInputStream variables_input(connection, fetch_query, variables_header, DEFAULT_BLOCK_SIZE);
|
||||
MySQLBlockInputStream variables_input(connection, fetch_query, variables_header, DEFAULT_BLOCK_SIZE, false, true);
|
||||
|
||||
while (Block variables_block = variables_input.read())
|
||||
{
|
||||
@ -114,23 +114,6 @@ void MaterializeMetadata::fetchMasterVariablesValue(const mysqlxx::PoolWithFailo
|
||||
}
|
||||
}
|
||||
|
||||
static Block getShowMasterLogHeader(const String & mysql_version)
|
||||
{
|
||||
if (startsWith(mysql_version, "5."))
|
||||
{
|
||||
return Block {
|
||||
{std::make_shared<DataTypeString>(), "Log_name"},
|
||||
{std::make_shared<DataTypeUInt64>(), "File_size"}
|
||||
};
|
||||
}
|
||||
|
||||
return Block {
|
||||
{std::make_shared<DataTypeString>(), "Log_name"},
|
||||
{std::make_shared<DataTypeUInt64>(), "File_size"},
|
||||
{std::make_shared<DataTypeString>(), "Encrypted"}
|
||||
};
|
||||
}
|
||||
|
||||
static bool checkSyncUserPrivImpl(const mysqlxx::PoolWithFailover::Entry & connection, WriteBuffer & out)
|
||||
{
|
||||
Block sync_user_privs_header
|
||||
@ -174,9 +157,14 @@ static void checkSyncUserPriv(const mysqlxx::PoolWithFailover::Entry & connectio
|
||||
"But the SYNC USER grant query is: " + out.str(), ErrorCodes::SYNC_MYSQL_USER_ACCESS_ERROR);
|
||||
}
|
||||
|
||||
bool MaterializeMetadata::checkBinlogFileExists(const mysqlxx::PoolWithFailover::Entry & connection, const String & mysql_version) const
|
||||
bool MaterializeMetadata::checkBinlogFileExists(const mysqlxx::PoolWithFailover::Entry & connection) const
|
||||
{
|
||||
MySQLBlockInputStream input(connection, "SHOW MASTER LOGS", getShowMasterLogHeader(mysql_version), DEFAULT_BLOCK_SIZE);
|
||||
Block logs_header {
|
||||
{std::make_shared<DataTypeString>(), "Log_name"},
|
||||
{std::make_shared<DataTypeUInt64>(), "File_size"}
|
||||
};
|
||||
|
||||
MySQLBlockInputStream input(connection, "SHOW MASTER LOGS", logs_header, DEFAULT_BLOCK_SIZE, false, true);
|
||||
|
||||
while (Block block = input.read())
|
||||
{
|
||||
@ -233,7 +221,7 @@ void MaterializeMetadata::transaction(const MySQLReplication::Position & positio
|
||||
|
||||
MaterializeMetadata::MaterializeMetadata(
|
||||
mysqlxx::PoolWithFailover::Entry & connection, const String & path_,
|
||||
const String & database, bool & opened_transaction, const String & mysql_version)
|
||||
const String & database, bool & opened_transaction)
|
||||
: persistent_path(path_)
|
||||
{
|
||||
checkSyncUserPriv(connection);
|
||||
@ -251,7 +239,7 @@ MaterializeMetadata::MaterializeMetadata(
|
||||
assertString("\nData Version:\t", in);
|
||||
readIntText(data_version, in);
|
||||
|
||||
if (checkBinlogFileExists(connection, mysql_version))
|
||||
if (checkBinlogFileExists(connection))
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -41,13 +41,13 @@ struct MaterializeMetadata
|
||||
|
||||
void fetchMasterVariablesValue(const mysqlxx::PoolWithFailover::Entry & connection);
|
||||
|
||||
bool checkBinlogFileExists(const mysqlxx::PoolWithFailover::Entry & connection, const String & mysql_version) const;
|
||||
bool checkBinlogFileExists(const mysqlxx::PoolWithFailover::Entry & connection) const;
|
||||
|
||||
void transaction(const MySQLReplication::Position & position, const std::function<void()> & fun);
|
||||
|
||||
MaterializeMetadata(
|
||||
mysqlxx::PoolWithFailover::Entry & connection, const String & path
|
||||
, const String & database, bool & opened_transaction, const String & mysql_version);
|
||||
, const String & database, bool & opened_transaction);
|
||||
};
|
||||
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user