2020-10-11 02:19:01 +00:00
|
|
|
#include "GRPCServer.h"
|
2021-09-21 12:29:04 +00:00
|
|
|
#include <limits>
|
|
|
|
#include <memory>
|
2020-10-11 02:19:01 +00:00
|
|
|
#if USE_GRPC
|
|
|
|
|
2020-10-24 00:37:57 +00:00
|
|
|
#include <Columns/ColumnString.h>
|
|
|
|
#include <Columns/ColumnsNumber.h>
|
2020-09-21 22:12:55 +00:00
|
|
|
#include <Common/CurrentThread.h>
|
|
|
|
#include <Common/SettingsChanges.h>
|
2021-07-31 13:58:54 +00:00
|
|
|
#include <Common/setThreadName.h>
|
2021-10-07 08:26:08 +00:00
|
|
|
#include <Common/Stopwatch.h>
|
2020-11-03 11:47:34 +00:00
|
|
|
#include <DataTypes/DataTypeFactory.h>
|
2021-10-15 20:18:20 +00:00
|
|
|
#include <QueryPipeline/ProfileInfo.h>
|
2020-10-11 02:19:01 +00:00
|
|
|
#include <Interpreters/Context.h>
|
2020-10-24 00:37:57 +00:00
|
|
|
#include <Interpreters/InternalTextLogsQueue.h>
|
2020-09-21 22:12:55 +00:00
|
|
|
#include <Interpreters/executeQuery.h>
|
2021-08-01 14:12:34 +00:00
|
|
|
#include <Interpreters/Session.h>
|
2021-12-15 21:06:17 +00:00
|
|
|
#include <IO/CompressionMethod.h>
|
2020-09-21 22:12:55 +00:00
|
|
|
#include <IO/ConcatReadBuffer.h>
|
|
|
|
#include <IO/ReadBufferFromString.h>
|
|
|
|
#include <IO/ReadHelpers.h>
|
|
|
|
#include <Parsers/parseQuery.h>
|
2021-11-26 15:49:40 +00:00
|
|
|
#include <Parsers/ASTIdentifier_fwd.h>
|
2020-09-21 22:12:55 +00:00
|
|
|
#include <Parsers/ASTInsertQuery.h>
|
2020-10-23 20:55:47 +00:00
|
|
|
#include <Parsers/ASTQueryWithOutput.h>
|
2020-09-21 22:12:55 +00:00
|
|
|
#include <Parsers/ParserQuery.h>
|
2020-10-11 02:19:01 +00:00
|
|
|
#include <Processors/Executors/PullingAsyncPipelineExecutor.h>
|
2021-07-20 18:18:43 +00:00
|
|
|
#include <Processors/Executors/PullingPipelineExecutor.h>
|
2021-09-03 17:29:36 +00:00
|
|
|
#include <Processors/Executors/PushingPipelineExecutor.h>
|
2021-09-26 14:54:59 +00:00
|
|
|
#include <Processors/Executors/CompletedPipelineExecutor.h>
|
2021-11-11 11:41:15 +00:00
|
|
|
#include <Processors/Executors/PipelineExecutor.h>
|
2021-07-20 18:18:43 +00:00
|
|
|
#include <Processors/Formats/IInputFormat.h>
|
2021-10-11 16:11:50 +00:00
|
|
|
#include <Processors/Formats/IOutputFormat.h>
|
2021-09-03 17:29:36 +00:00
|
|
|
#include <Processors/Sinks/SinkToStorage.h>
|
2021-09-26 14:54:59 +00:00
|
|
|
#include <Processors/Sinks/EmptySink.h>
|
2021-10-16 14:03:50 +00:00
|
|
|
#include <QueryPipeline/QueryPipelineBuilder.h>
|
2020-10-11 02:19:01 +00:00
|
|
|
#include <Server/IServer.h>
|
2020-09-21 22:12:55 +00:00
|
|
|
#include <Storages/IStorage.h>
|
2020-10-27 14:38:55 +00:00
|
|
|
#include <Poco/FileStream.h>
|
|
|
|
#include <Poco/StreamCopier.h>
|
2020-10-24 16:57:27 +00:00
|
|
|
#include <Poco/Util/LayeredConfiguration.h>
|
2021-10-02 07:13:14 +00:00
|
|
|
#include <base/range.h>
|
2022-04-27 15:05:45 +00:00
|
|
|
#include <Common/logger_useful.h>
|
2020-10-11 02:19:01 +00:00
|
|
|
#include <grpc++/security/server_credentials.h>
|
|
|
|
#include <grpc++/server.h>
|
|
|
|
#include <grpc++/server_builder.h>
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
|
2020-10-05 20:33:34 +00:00
|
|
|
using GRPCService = clickhouse::grpc::ClickHouse::AsyncService;
|
|
|
|
using GRPCQueryInfo = clickhouse::grpc::QueryInfo;
|
|
|
|
using GRPCResult = clickhouse::grpc::Result;
|
|
|
|
using GRPCException = clickhouse::grpc::Exception;
|
|
|
|
using GRPCProgress = clickhouse::grpc::Progress;
|
2022-02-06 18:33:31 +00:00
|
|
|
using GRPCObsoleteTransportCompression = clickhouse::grpc::ObsoleteTransportCompression;
|
2020-09-21 22:12:55 +00:00
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2020-10-27 14:38:55 +00:00
|
|
|
extern const int INVALID_CONFIG_PARAMETER;
|
2020-10-23 21:48:34 +00:00
|
|
|
extern const int INVALID_GRPC_QUERY_INFO;
|
2020-10-24 16:57:27 +00:00
|
|
|
extern const int INVALID_SESSION_TIMEOUT;
|
2020-10-24 22:03:49 +00:00
|
|
|
extern const int LOGICAL_ERROR;
|
2020-10-15 00:45:13 +00:00
|
|
|
extern const int NETWORK_ERROR;
|
2020-09-21 22:12:55 +00:00
|
|
|
extern const int NO_DATA_TO_INSERT;
|
2020-10-27 14:38:55 +00:00
|
|
|
extern const int SUPPORT_IS_DISABLED;
|
2022-01-27 16:43:19 +00:00
|
|
|
extern const int BAD_REQUEST_PARAMETER;
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
|
|
|
|
2020-10-11 02:19:01 +00:00
|
|
|
namespace
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-12-07 11:27:01 +00:00
|
|
|
/// Make grpc to pass logging messages to ClickHouse logging system.
|
|
|
|
void initGRPCLogging(const Poco::Util::AbstractConfiguration & config)
|
|
|
|
{
|
|
|
|
static std::once_flag once_flag;
|
|
|
|
std::call_once(once_flag, [&config]
|
|
|
|
{
|
|
|
|
static Poco::Logger * logger = &Poco::Logger::get("grpc");
|
|
|
|
gpr_set_log_function([](gpr_log_func_args* args)
|
|
|
|
{
|
|
|
|
if (args->severity == GPR_LOG_SEVERITY_DEBUG)
|
|
|
|
LOG_DEBUG(logger, "{} ({}:{})", args->message, args->file, args->line);
|
|
|
|
else if (args->severity == GPR_LOG_SEVERITY_INFO)
|
|
|
|
LOG_INFO(logger, "{} ({}:{})", args->message, args->file, args->line);
|
|
|
|
else if (args->severity == GPR_LOG_SEVERITY_ERROR)
|
|
|
|
LOG_ERROR(logger, "{} ({}:{})", args->message, args->file, args->line);
|
|
|
|
});
|
|
|
|
|
|
|
|
if (config.getBool("grpc.verbose_logs", false))
|
|
|
|
{
|
|
|
|
gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG);
|
|
|
|
grpc_tracer_set_enabled("all", true);
|
|
|
|
}
|
|
|
|
else if (logger->is(Poco::Message::PRIO_DEBUG))
|
|
|
|
{
|
|
|
|
gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG);
|
|
|
|
}
|
|
|
|
else if (logger->is(Poco::Message::PRIO_INFORMATION))
|
|
|
|
{
|
|
|
|
gpr_set_log_verbosity(GPR_LOG_SEVERITY_INFO);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-10-27 14:38:55 +00:00
|
|
|
/// Gets file's contents as a string, throws an exception if failed.
|
|
|
|
String readFile(const String & filepath)
|
|
|
|
{
|
|
|
|
Poco::FileInputStream ifs(filepath);
|
|
|
|
String res;
|
|
|
|
Poco::StreamCopier::copyToString(ifs, res);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Makes credentials based on the server config.
|
|
|
|
std::shared_ptr<grpc::ServerCredentials> makeCredentials(const Poco::Util::AbstractConfiguration & config)
|
|
|
|
{
|
|
|
|
if (config.getBool("grpc.enable_ssl", false))
|
|
|
|
{
|
|
|
|
#if USE_SSL
|
|
|
|
grpc::SslServerCredentialsOptions options;
|
|
|
|
grpc::SslServerCredentialsOptions::PemKeyCertPair key_cert_pair;
|
|
|
|
key_cert_pair.private_key = readFile(config.getString("grpc.ssl_key_file"));
|
|
|
|
key_cert_pair.cert_chain = readFile(config.getString("grpc.ssl_cert_file"));
|
|
|
|
options.pem_key_cert_pairs.emplace_back(std::move(key_cert_pair));
|
|
|
|
if (config.getBool("grpc.ssl_require_client_auth", false))
|
|
|
|
{
|
|
|
|
options.client_certificate_request = GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY;
|
|
|
|
if (config.has("grpc.ssl_ca_cert_file"))
|
|
|
|
options.pem_root_certs = readFile(config.getString("grpc.ssl_ca_cert_file"));
|
|
|
|
}
|
|
|
|
return grpc::SslServerCredentials(options);
|
|
|
|
#else
|
2023-01-24 00:49:16 +00:00
|
|
|
throw DB::Exception(DB::ErrorCodes::SUPPORT_IS_DISABLED, "Can't use SSL in grpc, because ClickHouse was built without SSL library");
|
2020-10-27 14:38:55 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return grpc::InsecureServerCredentials();
|
|
|
|
}
|
|
|
|
|
2022-02-06 18:33:31 +00:00
|
|
|
/// Transport compression makes gRPC library to compress packed Result messages before sending them through network.
|
|
|
|
struct TransportCompression
|
|
|
|
{
|
|
|
|
grpc_compression_algorithm algorithm;
|
|
|
|
grpc_compression_level level;
|
|
|
|
|
|
|
|
/// Extracts the settings of transport compression from a query info if possible.
|
|
|
|
static std::optional<TransportCompression> fromQueryInfo(const GRPCQueryInfo & query_info)
|
|
|
|
{
|
|
|
|
TransportCompression res;
|
|
|
|
if (!query_info.transport_compression_type().empty())
|
|
|
|
{
|
|
|
|
res.setAlgorithm(query_info.transport_compression_type(), ErrorCodes::INVALID_GRPC_QUERY_INFO);
|
|
|
|
res.setLevel(query_info.transport_compression_level(), ErrorCodes::INVALID_GRPC_QUERY_INFO);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (query_info.has_obsolete_result_compression())
|
|
|
|
{
|
|
|
|
switch (query_info.obsolete_result_compression().algorithm())
|
|
|
|
{
|
|
|
|
case GRPCObsoleteTransportCompression::NO_COMPRESSION: res.algorithm = GRPC_COMPRESS_NONE; break;
|
|
|
|
case GRPCObsoleteTransportCompression::DEFLATE: res.algorithm = GRPC_COMPRESS_DEFLATE; break;
|
|
|
|
case GRPCObsoleteTransportCompression::GZIP: res.algorithm = GRPC_COMPRESS_GZIP; break;
|
|
|
|
case GRPCObsoleteTransportCompression::STREAM_GZIP: res.algorithm = GRPC_COMPRESS_STREAM_GZIP; break;
|
|
|
|
default: throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "Unknown compression algorithm: {}", GRPCObsoleteTransportCompression::CompressionAlgorithm_Name(query_info.obsolete_result_compression().algorithm()));
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (query_info.obsolete_result_compression().level())
|
|
|
|
{
|
|
|
|
case GRPCObsoleteTransportCompression::COMPRESSION_NONE: res.level = GRPC_COMPRESS_LEVEL_NONE; break;
|
|
|
|
case GRPCObsoleteTransportCompression::COMPRESSION_LOW: res.level = GRPC_COMPRESS_LEVEL_LOW; break;
|
|
|
|
case GRPCObsoleteTransportCompression::COMPRESSION_MEDIUM: res.level = GRPC_COMPRESS_LEVEL_MED; break;
|
|
|
|
case GRPCObsoleteTransportCompression::COMPRESSION_HIGH: res.level = GRPC_COMPRESS_LEVEL_HIGH; break;
|
|
|
|
default: throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "Unknown compression level: {}", GRPCObsoleteTransportCompression::CompressionLevel_Name(query_info.obsolete_result_compression().level()));
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Extracts the settings of transport compression from the server configuration.
|
|
|
|
static TransportCompression fromConfiguration(const Poco::Util::AbstractConfiguration & config)
|
|
|
|
{
|
|
|
|
TransportCompression res;
|
|
|
|
if (config.has("grpc.transport_compression_type"))
|
|
|
|
{
|
|
|
|
res.setAlgorithm(config.getString("grpc.transport_compression_type"), ErrorCodes::INVALID_CONFIG_PARAMETER);
|
|
|
|
res.setLevel(config.getInt("grpc.transport_compression_level", 0), ErrorCodes::INVALID_CONFIG_PARAMETER);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
res.setAlgorithm(config.getString("grpc.compression", "none"), ErrorCodes::INVALID_CONFIG_PARAMETER);
|
|
|
|
res.setLevel(config.getString("grpc.compression_level", "none"), ErrorCodes::INVALID_CONFIG_PARAMETER);
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void setAlgorithm(const String & str, int error_code)
|
|
|
|
{
|
|
|
|
if (str == "none")
|
|
|
|
algorithm = GRPC_COMPRESS_NONE;
|
|
|
|
else if (str == "deflate")
|
|
|
|
algorithm = GRPC_COMPRESS_DEFLATE;
|
|
|
|
else if (str == "gzip")
|
|
|
|
algorithm = GRPC_COMPRESS_GZIP;
|
|
|
|
else if (str == "stream_gzip")
|
|
|
|
algorithm = GRPC_COMPRESS_STREAM_GZIP;
|
|
|
|
else
|
|
|
|
throw Exception(error_code, "Unknown compression algorithm: '{}'", str);
|
|
|
|
}
|
|
|
|
|
|
|
|
void setLevel(const String & str, int error_code)
|
|
|
|
{
|
|
|
|
if (str == "none")
|
|
|
|
level = GRPC_COMPRESS_LEVEL_NONE;
|
|
|
|
else if (str == "low")
|
|
|
|
level = GRPC_COMPRESS_LEVEL_LOW;
|
|
|
|
else if (str == "medium")
|
|
|
|
level = GRPC_COMPRESS_LEVEL_MED;
|
|
|
|
else if (str == "high")
|
|
|
|
level = GRPC_COMPRESS_LEVEL_HIGH;
|
|
|
|
else
|
|
|
|
throw Exception(error_code, "Unknown compression level: '{}'", str);
|
|
|
|
}
|
|
|
|
|
|
|
|
void setLevel(int level_, int error_code)
|
|
|
|
{
|
|
|
|
if (0 <= level_ && level_ < GRPC_COMPRESS_LEVEL_COUNT)
|
|
|
|
level = static_cast<grpc_compression_level>(level_);
|
|
|
|
else
|
|
|
|
throw Exception(error_code, "Compression level {} is out of range 0..{}", level_, GRPC_COMPRESS_LEVEL_COUNT - 1);
|
|
|
|
}
|
|
|
|
};
|
2020-10-27 14:38:55 +00:00
|
|
|
|
2020-10-24 16:57:27 +00:00
|
|
|
/// Gets session's timeout from query info or from the server config.
|
|
|
|
std::chrono::steady_clock::duration getSessionTimeout(const GRPCQueryInfo & query_info, const Poco::Util::AbstractConfiguration & config)
|
|
|
|
{
|
|
|
|
auto session_timeout = query_info.session_timeout();
|
|
|
|
if (session_timeout)
|
|
|
|
{
|
|
|
|
auto max_session_timeout = config.getUInt("max_session_timeout", 3600);
|
|
|
|
if (session_timeout > max_session_timeout)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::INVALID_SESSION_TIMEOUT, "Session timeout '{}' is larger than max_session_timeout: {}. "
|
|
|
|
"Maximum session timeout could be modified in configuration file.",
|
|
|
|
std::to_string(session_timeout), std::to_string(max_session_timeout));
|
2020-10-24 16:57:27 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
session_timeout = config.getInt("default_session_timeout", 60);
|
|
|
|
return std::chrono::seconds(session_timeout);
|
|
|
|
}
|
|
|
|
|
2020-11-01 21:23:27 +00:00
|
|
|
/// Generates a description of a query by a specified query info.
|
|
|
|
/// This description is used for logging only.
|
|
|
|
String getQueryDescription(const GRPCQueryInfo & query_info)
|
|
|
|
{
|
|
|
|
String str;
|
|
|
|
if (!query_info.query().empty())
|
|
|
|
{
|
|
|
|
std::string_view query = query_info.query();
|
|
|
|
constexpr size_t max_query_length_to_log = 64;
|
|
|
|
if (query.length() > max_query_length_to_log)
|
|
|
|
query.remove_suffix(query.length() - max_query_length_to_log);
|
|
|
|
if (size_t format_pos = query.find(" FORMAT "); format_pos != String::npos)
|
|
|
|
query.remove_suffix(query.length() - format_pos - strlen(" FORMAT "));
|
|
|
|
str.append("\"").append(query);
|
|
|
|
if (query != query_info.query())
|
|
|
|
str.append("...");
|
|
|
|
str.append("\"");
|
|
|
|
}
|
|
|
|
if (!query_info.query_id().empty())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("query_id: ").append(query_info.query_id());
|
|
|
|
if (!query_info.input_data().empty())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("input_data: ").append(std::to_string(query_info.input_data().size())).append(" bytes");
|
2020-11-03 11:47:34 +00:00
|
|
|
if (query_info.external_tables_size())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("external tables: ").append(std::to_string(query_info.external_tables_size()));
|
2020-11-01 21:23:27 +00:00
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Generates a description of a result.
|
|
|
|
/// This description is used for logging only.
|
|
|
|
String getResultDescription(const GRPCResult & result)
|
|
|
|
{
|
|
|
|
String str;
|
|
|
|
if (!result.output().empty())
|
|
|
|
str.append("output: ").append(std::to_string(result.output().size())).append(" bytes");
|
|
|
|
if (!result.totals().empty())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("totals");
|
|
|
|
if (!result.extremes().empty())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("extremes");
|
|
|
|
if (result.has_progress())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("progress");
|
2020-10-24 00:37:57 +00:00
|
|
|
if (result.logs_size())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("logs: ").append(std::to_string(result.logs_size())).append(" entries");
|
2020-11-02 00:47:43 +00:00
|
|
|
if (result.cancelled())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("cancelled");
|
2020-11-01 21:23:27 +00:00
|
|
|
if (result.has_exception())
|
|
|
|
str.append(str.empty() ? "" : ", ").append("exception");
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
2020-11-01 20:43:50 +00:00
|
|
|
using CompletionCallback = std::function<void(bool)>;
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Requests a connection and provides low-level interface for reading and writing.
|
2020-10-24 22:03:49 +00:00
|
|
|
class BaseResponder
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-11 02:19:01 +00:00
|
|
|
public:
|
2020-10-24 22:03:49 +00:00
|
|
|
virtual ~BaseResponder() = default;
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
virtual void start(GRPCService & grpc_service,
|
|
|
|
grpc::ServerCompletionQueue & new_call_queue,
|
|
|
|
grpc::ServerCompletionQueue & notification_queue,
|
|
|
|
const CompletionCallback & callback) = 0;
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
virtual void read(GRPCQueryInfo & query_info_, const CompletionCallback & callback) = 0;
|
|
|
|
virtual void write(const GRPCResult & result, const CompletionCallback & callback) = 0;
|
|
|
|
virtual void writeAndFinish(const GRPCResult & result, const grpc::Status & status, const CompletionCallback & callback) = 0;
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2021-09-12 21:43:04 +00:00
|
|
|
Poco::Net::SocketAddress getClientAddress() const
|
|
|
|
{
|
|
|
|
String peer = grpc_context.peer();
|
|
|
|
return Poco::Net::SocketAddress{peer.substr(peer.find(':') + 1)};
|
|
|
|
}
|
|
|
|
|
2022-02-05 19:09:56 +00:00
|
|
|
std::optional<String> getClientHeader(const String & key) const
|
|
|
|
{
|
|
|
|
const auto & client_metadata = grpc_context.client_metadata();
|
|
|
|
auto it = client_metadata.find(key);
|
|
|
|
if (it != client_metadata.end())
|
|
|
|
return String{it->second.data(), it->second.size()};
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
|
2022-02-06 18:33:31 +00:00
|
|
|
void setTransportCompression(const TransportCompression & transport_compression)
|
2021-09-12 21:43:04 +00:00
|
|
|
{
|
2022-02-06 18:33:31 +00:00
|
|
|
grpc_context.set_compression_algorithm(transport_compression.algorithm);
|
|
|
|
grpc_context.set_compression_level(transport_compression.level);
|
2021-09-12 21:43:04 +00:00
|
|
|
}
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
protected:
|
2020-11-01 20:43:50 +00:00
|
|
|
CompletionCallback * getCallbackPtr(const CompletionCallback & callback)
|
|
|
|
{
|
|
|
|
/// It would be better to pass callbacks to gRPC calls.
|
|
|
|
/// However gRPC calls can be tagged with `void *` tags only.
|
|
|
|
/// The map `callbacks` here is used to keep callbacks until they're called.
|
|
|
|
std::lock_guard lock{mutex};
|
|
|
|
size_t callback_id = next_callback_id++;
|
|
|
|
auto & callback_in_map = callbacks[callback_id];
|
|
|
|
callback_in_map = [this, callback, callback_id](bool ok)
|
|
|
|
{
|
|
|
|
CompletionCallback callback_to_call;
|
|
|
|
{
|
|
|
|
std::lock_guard lock2{mutex};
|
|
|
|
callback_to_call = callback;
|
|
|
|
callbacks.erase(callback_id);
|
|
|
|
}
|
|
|
|
callback_to_call(ok);
|
|
|
|
};
|
|
|
|
return &callback_in_map;
|
|
|
|
}
|
2020-10-24 22:03:49 +00:00
|
|
|
|
2022-02-05 19:09:56 +00:00
|
|
|
grpc::ServerContext grpc_context;
|
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
private:
|
2020-10-15 00:45:13 +00:00
|
|
|
grpc::ServerAsyncReaderWriter<GRPCResult, GRPCQueryInfo> reader_writer{&grpc_context};
|
2020-11-01 20:43:50 +00:00
|
|
|
std::unordered_map<size_t, CompletionCallback> callbacks;
|
|
|
|
size_t next_callback_id = 0;
|
|
|
|
std::mutex mutex;
|
2020-10-15 00:45:13 +00:00
|
|
|
};
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
enum CallType
|
|
|
|
{
|
|
|
|
CALL_SIMPLE, /// ExecuteQuery() call
|
|
|
|
CALL_WITH_STREAM_INPUT, /// ExecuteQueryWithStreamInput() call
|
|
|
|
CALL_WITH_STREAM_OUTPUT, /// ExecuteQueryWithStreamOutput() call
|
|
|
|
CALL_WITH_STREAM_IO, /// ExecuteQueryWithStreamIO() call
|
|
|
|
CALL_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
const char * getCallName(CallType call_type)
|
|
|
|
{
|
|
|
|
switch (call_type)
|
|
|
|
{
|
|
|
|
case CALL_SIMPLE: return "ExecuteQuery()";
|
|
|
|
case CALL_WITH_STREAM_INPUT: return "ExecuteQueryWithStreamInput()";
|
|
|
|
case CALL_WITH_STREAM_OUTPUT: return "ExecuteQueryWithStreamOutput()";
|
|
|
|
case CALL_WITH_STREAM_IO: return "ExecuteQueryWithStreamIO()";
|
|
|
|
case CALL_MAX: break;
|
|
|
|
}
|
2022-10-07 19:20:14 +00:00
|
|
|
UNREACHABLE();
|
2020-10-24 22:03:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool isInputStreaming(CallType call_type)
|
|
|
|
{
|
|
|
|
return (call_type == CALL_WITH_STREAM_INPUT) || (call_type == CALL_WITH_STREAM_IO);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isOutputStreaming(CallType call_type)
|
|
|
|
{
|
|
|
|
return (call_type == CALL_WITH_STREAM_OUTPUT) || (call_type == CALL_WITH_STREAM_IO);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <enum CallType call_type>
|
|
|
|
class Responder;
|
|
|
|
|
|
|
|
template<>
|
|
|
|
class Responder<CALL_SIMPLE> : public BaseResponder
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
void start(GRPCService & grpc_service,
|
|
|
|
grpc::ServerCompletionQueue & new_call_queue,
|
|
|
|
grpc::ServerCompletionQueue & notification_queue,
|
|
|
|
const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
grpc_service.RequestExecuteQuery(&grpc_context, &query_info.emplace(), &response_writer, &new_call_queue, ¬ification_queue, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
void read(GRPCQueryInfo & query_info_, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
if (!query_info.has_value())
|
|
|
|
callback(false);
|
|
|
|
query_info_ = std::move(query_info).value();
|
|
|
|
query_info.reset();
|
|
|
|
callback(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void write(const GRPCResult &, const CompletionCallback &) override
|
|
|
|
{
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Responder<CALL_SIMPLE>::write() should not be called");
|
2020-10-24 22:03:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void writeAndFinish(const GRPCResult & result, const grpc::Status & status, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
response_writer.Finish(result, status, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
grpc::ServerAsyncResponseWriter<GRPCResult> response_writer{&grpc_context};
|
|
|
|
std::optional<GRPCQueryInfo> query_info;
|
|
|
|
};
|
|
|
|
|
|
|
|
template<>
|
|
|
|
class Responder<CALL_WITH_STREAM_INPUT> : public BaseResponder
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
void start(GRPCService & grpc_service,
|
|
|
|
grpc::ServerCompletionQueue & new_call_queue,
|
|
|
|
grpc::ServerCompletionQueue & notification_queue,
|
|
|
|
const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
grpc_service.RequestExecuteQueryWithStreamInput(&grpc_context, &reader, &new_call_queue, ¬ification_queue, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
void read(GRPCQueryInfo & query_info_, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
reader.Read(&query_info_, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
void write(const GRPCResult &, const CompletionCallback &) override
|
|
|
|
{
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Responder<CALL_WITH_STREAM_INPUT>::write() should not be called");
|
2020-10-24 22:03:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void writeAndFinish(const GRPCResult & result, const grpc::Status & status, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
reader.Finish(result, status, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
grpc::ServerAsyncReader<GRPCResult, GRPCQueryInfo> reader{&grpc_context};
|
|
|
|
};
|
|
|
|
|
|
|
|
template<>
|
|
|
|
class Responder<CALL_WITH_STREAM_OUTPUT> : public BaseResponder
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
void start(GRPCService & grpc_service,
|
|
|
|
grpc::ServerCompletionQueue & new_call_queue,
|
|
|
|
grpc::ServerCompletionQueue & notification_queue,
|
|
|
|
const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
grpc_service.RequestExecuteQueryWithStreamOutput(&grpc_context, &query_info.emplace(), &writer, &new_call_queue, ¬ification_queue, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
void read(GRPCQueryInfo & query_info_, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
if (!query_info.has_value())
|
|
|
|
callback(false);
|
|
|
|
query_info_ = std::move(query_info).value();
|
|
|
|
query_info.reset();
|
|
|
|
callback(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void write(const GRPCResult & result, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
writer.Write(result, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
void writeAndFinish(const GRPCResult & result, const grpc::Status & status, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
writer.WriteAndFinish(result, {}, status, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
grpc::ServerAsyncWriter<GRPCResult> writer{&grpc_context};
|
|
|
|
std::optional<GRPCQueryInfo> query_info;
|
|
|
|
};
|
|
|
|
|
|
|
|
template<>
|
|
|
|
class Responder<CALL_WITH_STREAM_IO> : public BaseResponder
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
void start(GRPCService & grpc_service,
|
|
|
|
grpc::ServerCompletionQueue & new_call_queue,
|
|
|
|
grpc::ServerCompletionQueue & notification_queue,
|
|
|
|
const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
grpc_service.RequestExecuteQueryWithStreamIO(&grpc_context, &reader_writer, &new_call_queue, ¬ification_queue, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
void read(GRPCQueryInfo & query_info_, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
reader_writer.Read(&query_info_, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
void write(const GRPCResult & result, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
reader_writer.Write(result, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
void writeAndFinish(const GRPCResult & result, const grpc::Status & status, const CompletionCallback & callback) override
|
|
|
|
{
|
|
|
|
reader_writer.WriteAndFinish(result, {}, status, getCallbackPtr(callback));
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
grpc::ServerAsyncReaderWriter<GRPCResult, GRPCQueryInfo> reader_writer{&grpc_context};
|
|
|
|
};
|
|
|
|
|
|
|
|
std::unique_ptr<BaseResponder> makeResponder(CallType call_type)
|
|
|
|
{
|
|
|
|
switch (call_type)
|
|
|
|
{
|
|
|
|
case CALL_SIMPLE: return std::make_unique<Responder<CALL_SIMPLE>>();
|
|
|
|
case CALL_WITH_STREAM_INPUT: return std::make_unique<Responder<CALL_WITH_STREAM_INPUT>>();
|
|
|
|
case CALL_WITH_STREAM_OUTPUT: return std::make_unique<Responder<CALL_WITH_STREAM_OUTPUT>>();
|
|
|
|
case CALL_WITH_STREAM_IO: return std::make_unique<Responder<CALL_WITH_STREAM_IO>>();
|
|
|
|
case CALL_MAX: break;
|
|
|
|
}
|
2022-10-07 19:20:14 +00:00
|
|
|
UNREACHABLE();
|
2020-10-24 22:03:49 +00:00
|
|
|
}
|
|
|
|
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-11-03 21:33:23 +00:00
|
|
|
/// Implementation of ReadBuffer, which just calls a callback.
|
|
|
|
class ReadBufferFromCallback : public ReadBuffer
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit ReadBufferFromCallback(const std::function<std::pair<const void *, size_t>(void)> & callback_)
|
|
|
|
: ReadBuffer(nullptr, 0), callback(callback_) {}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool nextImpl() override
|
|
|
|
{
|
|
|
|
const void * new_pos;
|
|
|
|
size_t new_size;
|
|
|
|
std::tie(new_pos, new_size) = callback();
|
|
|
|
if (!new_size)
|
|
|
|
return false;
|
|
|
|
BufferBase::set(static_cast<BufferBase::Position>(const_cast<void *>(new_pos)), new_size, 0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::function<std::pair<const void *, size_t>(void)> callback;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2021-07-31 13:58:54 +00:00
|
|
|
/// A boolean state protected by mutex able to wait until other thread sets it to a specific value.
|
|
|
|
class BoolState
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit BoolState(bool initial_value) : value(initial_value) {}
|
|
|
|
|
|
|
|
bool get() const
|
|
|
|
{
|
|
|
|
std::lock_guard lock{mutex};
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void set(bool new_value)
|
|
|
|
{
|
|
|
|
std::lock_guard lock{mutex};
|
|
|
|
if (value == new_value)
|
|
|
|
return;
|
|
|
|
value = new_value;
|
|
|
|
changed.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void wait(bool wanted_value) const
|
|
|
|
{
|
|
|
|
std::unique_lock lock{mutex};
|
|
|
|
changed.wait(lock, [this, wanted_value]() { return value == wanted_value; });
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool value;
|
|
|
|
mutable std::mutex mutex;
|
|
|
|
mutable std::condition_variable changed;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Handles a connection after a responder is started (i.e. after getting a new call).
|
|
|
|
class Call
|
|
|
|
{
|
|
|
|
public:
|
2020-10-24 22:03:49 +00:00
|
|
|
Call(CallType call_type_, std::unique_ptr<BaseResponder> responder_, IServer & iserver_, Poco::Logger * log_);
|
2020-10-15 00:45:13 +00:00
|
|
|
~Call();
|
|
|
|
|
|
|
|
void start(const std::function<void(void)> & on_finish_call_callback);
|
|
|
|
|
|
|
|
private:
|
|
|
|
void run();
|
|
|
|
|
|
|
|
void receiveQuery();
|
|
|
|
void executeQuery();
|
2020-11-03 21:33:23 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
void processInput();
|
2022-05-09 19:13:02 +00:00
|
|
|
void initializePipeline(const Block & header);
|
2020-11-03 11:47:34 +00:00
|
|
|
void createExternalTables();
|
2020-11-03 21:33:23 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
void generateOutput();
|
2020-11-03 21:33:23 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
void finishQuery();
|
|
|
|
void onException(const Exception & exception);
|
2020-10-24 00:37:57 +00:00
|
|
|
void onFatalError();
|
2021-10-10 14:32:01 +00:00
|
|
|
void releaseQueryIDAndSessionID();
|
2020-10-15 00:45:13 +00:00
|
|
|
void close();
|
|
|
|
|
2020-11-01 20:43:50 +00:00
|
|
|
void readQueryInfo();
|
2020-11-08 10:26:45 +00:00
|
|
|
void throwIfFailedToReadQueryInfo();
|
2020-11-02 00:47:43 +00:00
|
|
|
bool isQueryCancelled();
|
2020-11-03 21:33:23 +00:00
|
|
|
|
2022-02-08 13:15:56 +00:00
|
|
|
void addQueryDetailsToResult();
|
2022-02-10 13:43:12 +00:00
|
|
|
void addOutputFormatToResult();
|
2022-02-08 13:15:56 +00:00
|
|
|
void addOutputColumnsNamesAndTypesToResult(const Block & headers);
|
2020-10-21 14:35:38 +00:00
|
|
|
void addProgressToResult();
|
|
|
|
void addTotalsToResult(const Block & totals);
|
|
|
|
void addExtremesToResult(const Block & extremes);
|
2021-10-15 20:18:20 +00:00
|
|
|
void addProfileInfoToResult(const ProfileInfo & info);
|
2020-10-24 00:37:57 +00:00
|
|
|
void addLogsToResult();
|
2020-10-21 14:35:38 +00:00
|
|
|
void sendResult();
|
2020-11-01 20:43:50 +00:00
|
|
|
void throwIfFailedToSendResult();
|
2020-10-15 00:45:13 +00:00
|
|
|
void sendException(const Exception & exception);
|
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
const CallType call_type;
|
|
|
|
std::unique_ptr<BaseResponder> responder;
|
2020-10-15 00:45:13 +00:00
|
|
|
IServer & iserver;
|
|
|
|
Poco::Logger * log = nullptr;
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2021-08-01 14:12:34 +00:00
|
|
|
std::optional<Session> session;
|
2021-05-31 14:49:02 +00:00
|
|
|
ContextMutablePtr query_context;
|
2020-10-15 00:45:13 +00:00
|
|
|
std::optional<CurrentThread::QueryScope> query_scope;
|
2022-08-25 12:20:13 +00:00
|
|
|
OpenTelemetry::TracingContextHolderPtr thread_trace_context;
|
2020-10-15 00:45:13 +00:00
|
|
|
String query_text;
|
|
|
|
ASTPtr ast;
|
|
|
|
ASTInsertQuery * insert_query = nullptr;
|
|
|
|
String input_format;
|
2020-11-04 14:59:31 +00:00
|
|
|
String input_data_delimiter;
|
2022-02-06 16:10:41 +00:00
|
|
|
CompressionMethod input_compression_method = CompressionMethod::None;
|
2021-12-15 21:06:17 +00:00
|
|
|
PODArray<char> output;
|
2020-10-15 00:45:13 +00:00
|
|
|
String output_format;
|
2022-02-08 13:15:56 +00:00
|
|
|
bool send_output_columns_names_and_types = false;
|
2022-02-06 16:10:41 +00:00
|
|
|
CompressionMethod output_compression_method = CompressionMethod::None;
|
|
|
|
int output_compression_level = 0;
|
2021-12-15 21:06:17 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
uint64_t interactive_delay = 100000;
|
|
|
|
bool send_exception_with_stacktrace = true;
|
2020-11-03 04:37:59 +00:00
|
|
|
bool input_function_is_used = false;
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
BlockIO io;
|
|
|
|
Progress progress;
|
2020-10-24 00:37:57 +00:00
|
|
|
InternalTextLogsQueuePtr logs_queue;
|
2020-10-15 00:45:13 +00:00
|
|
|
|
|
|
|
GRPCQueryInfo query_info; /// We reuse the same messages multiple times.
|
|
|
|
GRPCResult result;
|
|
|
|
|
2020-11-08 10:26:45 +00:00
|
|
|
bool initial_query_info_read = false;
|
2020-10-21 14:35:38 +00:00
|
|
|
bool finalize = false;
|
|
|
|
bool responder_finished = false;
|
2020-11-02 00:47:43 +00:00
|
|
|
bool cancelled = false;
|
2020-10-21 14:35:38 +00:00
|
|
|
|
2021-12-15 21:06:17 +00:00
|
|
|
std::unique_ptr<ReadBuffer> read_buffer;
|
|
|
|
std::unique_ptr<WriteBuffer> write_buffer;
|
|
|
|
WriteBufferFromVector<PODArray<char>> * nested_write_buffer = nullptr;
|
|
|
|
WriteBuffer * compressing_write_buffer = nullptr;
|
2021-09-16 17:40:42 +00:00
|
|
|
std::unique_ptr<QueryPipeline> pipeline;
|
2021-07-20 18:18:43 +00:00
|
|
|
std::unique_ptr<PullingPipelineExecutor> pipeline_executor;
|
2021-10-11 16:11:50 +00:00
|
|
|
std::shared_ptr<IOutputFormat> output_format_processor;
|
2020-11-03 21:33:23 +00:00
|
|
|
bool need_input_data_from_insert_query = true;
|
|
|
|
bool need_input_data_from_query_info = true;
|
2020-11-04 14:59:31 +00:00
|
|
|
bool need_input_data_delimiter = false;
|
2020-11-03 21:33:23 +00:00
|
|
|
|
2020-11-01 21:23:27 +00:00
|
|
|
Stopwatch query_time;
|
|
|
|
UInt64 waited_for_client_reading = 0;
|
|
|
|
UInt64 waited_for_client_writing = 0;
|
|
|
|
|
2020-11-08 10:26:45 +00:00
|
|
|
/// The following fields are accessed both from call_thread and queue_thread.
|
2021-07-31 13:58:54 +00:00
|
|
|
BoolState reading_query_info{false};
|
2020-11-08 10:26:45 +00:00
|
|
|
std::atomic<bool> failed_to_read_query_info = false;
|
|
|
|
GRPCQueryInfo next_query_info_while_reading;
|
2020-11-02 00:47:43 +00:00
|
|
|
std::atomic<bool> want_to_cancel = false;
|
|
|
|
std::atomic<bool> check_query_info_contains_cancel_only = false;
|
2021-07-31 13:58:54 +00:00
|
|
|
BoolState sending_result{false};
|
2020-11-01 20:43:50 +00:00
|
|
|
std::atomic<bool> failed_to_send_result = false;
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
ThreadFromGlobalPool call_thread;
|
2020-10-11 02:19:01 +00:00
|
|
|
};
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
Call::Call(CallType call_type_, std::unique_ptr<BaseResponder> responder_, IServer & iserver_, Poco::Logger * log_)
|
|
|
|
: call_type(call_type_), responder(std::move(responder_)), iserver(iserver_), log(log_)
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
Call::~Call()
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
if (call_thread.joinable())
|
|
|
|
call_thread.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Call::start(const std::function<void(void)> & on_finish_call_callback)
|
|
|
|
{
|
|
|
|
auto runner_function = [this, on_finish_call_callback]
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
try
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
run();
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
tryLogCurrentException("GRPCServer");
|
2020-10-11 02:19:01 +00:00
|
|
|
}
|
2020-10-15 00:45:13 +00:00
|
|
|
on_finish_call_callback();
|
|
|
|
};
|
|
|
|
call_thread = ThreadFromGlobalPool(runner_function);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Call::run()
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
2021-07-31 13:58:54 +00:00
|
|
|
setThreadName("GRPCServerCall");
|
2020-10-15 00:45:13 +00:00
|
|
|
receiveQuery();
|
|
|
|
executeQuery();
|
|
|
|
processInput();
|
|
|
|
generateOutput();
|
|
|
|
finishQuery();
|
2020-10-11 02:19:01 +00:00
|
|
|
}
|
2020-10-15 00:45:13 +00:00
|
|
|
catch (Exception & exception)
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
onException(exception);
|
|
|
|
}
|
|
|
|
catch (Poco::Exception & exception)
|
|
|
|
{
|
|
|
|
onException(Exception{Exception::CreateFromPocoTag{}, exception});
|
|
|
|
}
|
|
|
|
catch (std::exception & exception)
|
|
|
|
{
|
|
|
|
onException(Exception{Exception::CreateFromSTDTag{}, exception});
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
void Call::receiveQuery()
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-24 22:03:49 +00:00
|
|
|
LOG_INFO(log, "Handling call {}", getCallName(call_type));
|
2020-11-01 21:23:27 +00:00
|
|
|
|
2020-11-01 20:43:50 +00:00
|
|
|
readQueryInfo();
|
2020-11-01 21:23:27 +00:00
|
|
|
|
2020-11-02 00:47:43 +00:00
|
|
|
if (query_info.cancel())
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "Initial query info cannot set the 'cancel' field");
|
2020-11-02 00:47:43 +00:00
|
|
|
|
2020-11-01 21:23:27 +00:00
|
|
|
LOG_DEBUG(log, "Received initial QueryInfo: {}", getQueryDescription(query_info));
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
void Call::executeQuery()
|
|
|
|
{
|
|
|
|
/// Retrieve user credentials.
|
|
|
|
std::string user = query_info.user_name();
|
|
|
|
std::string password = query_info.password();
|
|
|
|
std::string quota_key = query_info.quota();
|
|
|
|
Poco::Net::SocketAddress user_address = responder->getClientAddress();
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-10-11 02:19:01 +00:00
|
|
|
if (user.empty())
|
|
|
|
{
|
|
|
|
user = "default";
|
|
|
|
password = "";
|
|
|
|
}
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Authentication.
|
2021-08-01 14:12:34 +00:00
|
|
|
session.emplace(iserver.context(), ClientInfo::Interface::GRPC);
|
|
|
|
session->authenticate(user, password, user_address);
|
|
|
|
session->getClientInfo().quota_key = quota_key;
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2022-01-27 16:43:19 +00:00
|
|
|
ClientInfo client_info = session->getClientInfo();
|
2022-02-05 19:09:56 +00:00
|
|
|
|
|
|
|
/// Parse the OpenTelemetry traceparent header.
|
|
|
|
auto traceparent = responder->getClientHeader("traceparent");
|
|
|
|
if (traceparent)
|
|
|
|
{
|
|
|
|
String error;
|
|
|
|
if (!client_info.client_trace_context.parseTraceparentHeader(traceparent.value(), error))
|
2022-01-27 16:43:19 +00:00
|
|
|
{
|
|
|
|
throw Exception(ErrorCodes::BAD_REQUEST_PARAMETER,
|
|
|
|
"Failed to parse OpenTelemetry traceparent header '{}': {}",
|
2022-02-05 19:09:56 +00:00
|
|
|
traceparent.value(), error);
|
2022-01-27 16:43:19 +00:00
|
|
|
}
|
2022-02-05 19:09:56 +00:00
|
|
|
auto tracestate = responder->getClientHeader("tracestate");
|
|
|
|
client_info.client_trace_context.tracestate = tracestate.value_or("");
|
2022-01-27 16:43:19 +00:00
|
|
|
}
|
|
|
|
|
2020-10-24 16:57:27 +00:00
|
|
|
/// The user could specify session identifier and session timeout.
|
|
|
|
/// It allows to modify settings, create temporary tables and reuse them in subsequent requests.
|
|
|
|
if (!query_info.session_id().empty())
|
|
|
|
{
|
2021-08-01 14:12:34 +00:00
|
|
|
session->makeSessionContext(
|
2020-10-24 16:57:27 +00:00
|
|
|
query_info.session_id(), getSessionTimeout(query_info, iserver.config()), query_info.session_check());
|
|
|
|
}
|
|
|
|
|
2022-01-27 16:43:19 +00:00
|
|
|
query_context = session->makeQueryContext(std::move(client_info));
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Prepare settings.
|
2020-10-11 02:19:01 +00:00
|
|
|
SettingsChanges settings_changes;
|
2020-10-15 00:45:13 +00:00
|
|
|
for (const auto & [key, value] : query_info.settings())
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
|
|
|
settings_changes.push_back({key, value});
|
|
|
|
}
|
|
|
|
query_context->checkSettingsConstraints(settings_changes);
|
|
|
|
query_context->applySettingsChanges(settings_changes);
|
2021-08-01 14:12:34 +00:00
|
|
|
|
|
|
|
query_context->setCurrentQueryId(query_info.query_id());
|
|
|
|
query_scope.emplace(query_context);
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2022-07-07 09:44:19 +00:00
|
|
|
/// Set up tracing context for this query on current thread
|
2022-08-25 12:20:13 +00:00
|
|
|
thread_trace_context = std::make_unique<OpenTelemetry::TracingContextHolder>("GRPCServer",
|
|
|
|
query_context->getClientInfo().client_trace_context,
|
|
|
|
query_context->getSettingsRef(),
|
|
|
|
query_context->getOpenTelemetrySpanLog());
|
2022-07-07 09:43:52 +00:00
|
|
|
|
2020-10-24 00:37:57 +00:00
|
|
|
/// Prepare for sending exceptions and logs.
|
2021-08-01 14:12:34 +00:00
|
|
|
const Settings & settings = query_context->getSettingsRef();
|
|
|
|
send_exception_with_stacktrace = settings.calculate_text_stack_trace;
|
|
|
|
const auto client_logs_level = settings.send_logs_level;
|
2020-10-24 00:37:57 +00:00
|
|
|
if (client_logs_level != LogsLevel::none)
|
|
|
|
{
|
|
|
|
logs_queue = std::make_shared<InternalTextLogsQueue>();
|
|
|
|
logs_queue->max_priority = Poco::Logger::parseLevel(client_logs_level.toString());
|
2022-07-13 08:15:37 +00:00
|
|
|
logs_queue->setSourceRegexp(settings.send_logs_source_regexp);
|
2020-10-24 00:37:57 +00:00
|
|
|
CurrentThread::attachInternalTextLogsQueue(logs_queue, client_logs_level);
|
|
|
|
CurrentThread::setFatalErrorCallback([this]{ onFatalError(); });
|
|
|
|
}
|
2020-10-23 21:48:34 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Set the current database if specified.
|
|
|
|
if (!query_info.database().empty())
|
|
|
|
query_context->setCurrentDatabase(query_info.database());
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2022-02-06 18:33:31 +00:00
|
|
|
/// Apply transport compression for this call.
|
|
|
|
if (auto transport_compression = TransportCompression::fromQueryInfo(query_info))
|
|
|
|
responder->setTransportCompression(*transport_compression);
|
2021-09-12 21:43:04 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// The interactive delay will be used to show progress.
|
2021-08-01 14:12:34 +00:00
|
|
|
interactive_delay = settings.interactive_delay;
|
2020-10-15 00:45:13 +00:00
|
|
|
query_context->setProgressCallback([this](const Progress & value) { return progress.incrementPiecewiseAtomically(value); });
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Parse the query.
|
2020-11-08 10:26:45 +00:00
|
|
|
query_text = std::move(*(query_info.mutable_query()));
|
2020-10-15 00:45:13 +00:00
|
|
|
const char * begin = query_text.data();
|
|
|
|
const char * end = begin + query_text.size();
|
2022-04-04 08:09:22 +00:00
|
|
|
ParserQuery parser(end, settings.allow_settings_after_format_in_insert);
|
2020-10-15 00:45:13 +00:00
|
|
|
ast = parseQuery(parser, begin, end, "", settings.max_query_size, settings.max_parser_depth);
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-23 20:55:47 +00:00
|
|
|
/// Choose input format.
|
|
|
|
insert_query = ast->as<ASTInsertQuery>();
|
|
|
|
if (insert_query)
|
|
|
|
{
|
|
|
|
input_format = insert_query->format;
|
|
|
|
if (input_format.empty())
|
|
|
|
input_format = "Values";
|
|
|
|
}
|
|
|
|
|
2020-11-04 14:59:31 +00:00
|
|
|
input_data_delimiter = query_info.input_data_delimiter();
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Choose output format.
|
2020-10-23 20:55:47 +00:00
|
|
|
query_context->setDefaultFormat(query_info.output_format());
|
|
|
|
if (const auto * ast_query_with_output = dynamic_cast<const ASTQueryWithOutput *>(ast.get());
|
|
|
|
ast_query_with_output && ast_query_with_output->format)
|
2020-10-15 00:45:13 +00:00
|
|
|
{
|
2020-10-23 20:55:47 +00:00
|
|
|
output_format = getIdentifierName(ast_query_with_output->format);
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
2020-10-23 20:55:47 +00:00
|
|
|
if (output_format.empty())
|
|
|
|
output_format = query_context->getDefaultFormat();
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2022-02-08 13:15:56 +00:00
|
|
|
send_output_columns_names_and_types = query_info.send_output_columns();
|
|
|
|
|
2021-12-15 21:06:17 +00:00
|
|
|
/// Choose compression.
|
2022-02-06 16:10:41 +00:00
|
|
|
String input_compression_method_str = query_info.input_compression_type();
|
|
|
|
if (input_compression_method_str.empty())
|
|
|
|
input_compression_method_str = query_info.obsolete_compression_type();
|
|
|
|
input_compression_method = chooseCompressionMethod("", input_compression_method_str);
|
|
|
|
|
|
|
|
String output_compression_method_str = query_info.output_compression_type();
|
|
|
|
if (output_compression_method_str.empty())
|
|
|
|
output_compression_method_str = query_info.obsolete_compression_type();
|
|
|
|
output_compression_method = chooseCompressionMethod("", output_compression_method_str);
|
|
|
|
output_compression_level = query_info.output_compression_level();
|
2021-12-15 21:06:17 +00:00
|
|
|
|
2020-11-03 11:47:34 +00:00
|
|
|
/// Set callback to create and fill external tables
|
2021-04-10 23:33:54 +00:00
|
|
|
query_context->setExternalTablesInitializer([this] (ContextPtr context)
|
2020-11-03 11:47:34 +00:00
|
|
|
{
|
2021-04-10 23:33:54 +00:00
|
|
|
if (context != query_context)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected context in external tables initializer");
|
2020-11-03 11:47:34 +00:00
|
|
|
createExternalTables();
|
|
|
|
});
|
|
|
|
|
2020-11-03 04:37:59 +00:00
|
|
|
/// Set callbacks to execute function input().
|
2021-04-10 23:33:54 +00:00
|
|
|
query_context->setInputInitializer([this] (ContextPtr context, const StoragePtr & input_storage)
|
2020-11-03 04:37:59 +00:00
|
|
|
{
|
2021-04-10 23:33:54 +00:00
|
|
|
if (context != query_context)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected context in Input initializer");
|
2020-11-03 04:37:59 +00:00
|
|
|
input_function_is_used = true;
|
2022-05-09 19:13:02 +00:00
|
|
|
initializePipeline(input_storage->getInMemoryMetadataPtr()->getSampleBlock());
|
2020-11-03 04:37:59 +00:00
|
|
|
});
|
|
|
|
|
2021-04-10 23:33:54 +00:00
|
|
|
query_context->setInputBlocksReaderCallback([this](ContextPtr context) -> Block
|
2020-11-03 04:37:59 +00:00
|
|
|
{
|
2021-04-10 23:33:54 +00:00
|
|
|
if (context != query_context)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected context in InputBlocksReader");
|
2021-07-20 18:18:43 +00:00
|
|
|
|
|
|
|
Block block;
|
2021-07-22 10:38:22 +00:00
|
|
|
while (!block && pipeline_executor->pull(block));
|
2021-07-20 18:18:43 +00:00
|
|
|
|
2020-11-03 04:37:59 +00:00
|
|
|
return block;
|
|
|
|
});
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Start executing the query.
|
|
|
|
const auto * query_end = end;
|
2020-10-11 02:19:01 +00:00
|
|
|
if (insert_query && insert_query->data)
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-11 02:19:01 +00:00
|
|
|
query_end = insert_query->data;
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
String query(begin, query_end);
|
2021-06-28 20:43:37 +00:00
|
|
|
io = ::DB::executeQuery(true, query, query_context);
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Call::processInput()
|
|
|
|
{
|
2021-09-16 17:40:42 +00:00
|
|
|
if (!io.pipeline.pushing())
|
2020-10-15 00:45:13 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
bool has_data_to_insert = (insert_query && insert_query->data)
|
|
|
|
|| !query_info.input_data().empty() || query_info.next_query_info();
|
|
|
|
if (!has_data_to_insert)
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
if (!insert_query)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::NO_DATA_TO_INSERT, "Query requires data to insert, but it is not an INSERT query");
|
2020-10-15 00:45:13 +00:00
|
|
|
else
|
2022-04-17 14:32:26 +00:00
|
|
|
{
|
|
|
|
const auto & settings = query_context->getSettingsRef();
|
|
|
|
if (settings.throw_if_no_data_to_insert)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::NO_DATA_TO_INSERT, "No data to insert");
|
2022-04-17 14:32:26 +00:00
|
|
|
else
|
|
|
|
return;
|
|
|
|
}
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2021-03-15 12:14:58 +00:00
|
|
|
/// This is significant, because parallel parsing may be used.
|
|
|
|
/// So we mustn't touch the input stream from other thread.
|
2022-05-09 19:13:02 +00:00
|
|
|
initializePipeline(io.pipeline.getHeader());
|
2021-03-15 12:14:58 +00:00
|
|
|
|
2021-09-16 17:40:42 +00:00
|
|
|
PushingPipelineExecutor executor(io.pipeline);
|
2021-09-03 17:29:36 +00:00
|
|
|
executor.start();
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2021-07-20 18:18:43 +00:00
|
|
|
Block block;
|
|
|
|
while (pipeline_executor->pull(block))
|
|
|
|
{
|
|
|
|
if (block)
|
2021-09-03 17:29:36 +00:00
|
|
|
executor.push(block);
|
2021-07-20 18:18:43 +00:00
|
|
|
}
|
2020-11-03 21:33:23 +00:00
|
|
|
|
2021-09-03 17:29:36 +00:00
|
|
|
executor.finish();
|
2020-11-03 21:33:23 +00:00
|
|
|
}
|
|
|
|
|
2022-05-09 19:13:02 +00:00
|
|
|
void Call::initializePipeline(const Block & header)
|
2020-11-03 21:33:23 +00:00
|
|
|
{
|
|
|
|
assert(!read_buffer);
|
2021-12-15 21:06:17 +00:00
|
|
|
read_buffer = std::make_unique<ReadBufferFromCallback>([this]() -> std::pair<const void *, size_t>
|
2020-10-15 00:45:13 +00:00
|
|
|
{
|
2020-11-03 21:33:23 +00:00
|
|
|
if (need_input_data_from_insert_query)
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
2020-11-03 21:33:23 +00:00
|
|
|
need_input_data_from_insert_query = false;
|
|
|
|
if (insert_query && insert_query->data && (insert_query->data != insert_query->end))
|
|
|
|
{
|
2020-11-04 14:59:31 +00:00
|
|
|
need_input_data_delimiter = !input_data_delimiter.empty();
|
2020-11-03 21:33:23 +00:00
|
|
|
return {insert_query->data, insert_query->end - insert_query->data};
|
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
}
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-11-03 21:33:23 +00:00
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
if (need_input_data_from_query_info)
|
|
|
|
{
|
2020-11-04 14:59:31 +00:00
|
|
|
if (need_input_data_delimiter && !query_info.input_data().empty())
|
|
|
|
{
|
|
|
|
need_input_data_delimiter = false;
|
|
|
|
return {input_data_delimiter.data(), input_data_delimiter.size()};
|
|
|
|
}
|
2020-11-03 21:33:23 +00:00
|
|
|
need_input_data_from_query_info = false;
|
|
|
|
if (!query_info.input_data().empty())
|
2020-11-04 14:59:31 +00:00
|
|
|
{
|
|
|
|
need_input_data_delimiter = !input_data_delimiter.empty();
|
2020-11-03 21:33:23 +00:00
|
|
|
return {query_info.input_data().data(), query_info.input_data().size()};
|
2020-11-04 14:59:31 +00:00
|
|
|
}
|
2020-11-03 21:33:23 +00:00
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-11-03 21:33:23 +00:00
|
|
|
if (!query_info.next_query_info())
|
|
|
|
break;
|
2020-10-05 20:33:34 +00:00
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
if (!isInputStreaming(call_type))
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "next_query_info is allowed to be set only for streaming input");
|
2020-10-24 22:03:49 +00:00
|
|
|
|
2020-11-03 21:33:23 +00:00
|
|
|
readQueryInfo();
|
|
|
|
if (!query_info.query().empty() || !query_info.query_id().empty() || !query_info.settings().empty()
|
|
|
|
|| !query_info.database().empty() || !query_info.input_data_delimiter().empty() || !query_info.output_format().empty()
|
2020-11-03 11:47:34 +00:00
|
|
|
|| query_info.external_tables_size() || !query_info.user_name().empty() || !query_info.password().empty()
|
|
|
|
|| !query_info.quota().empty() || !query_info.session_id().empty())
|
2020-11-03 21:33:23 +00:00
|
|
|
{
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO,
|
|
|
|
"Extra query infos can be used only to add more input data. "
|
|
|
|
"Only the following fields can be set: input_data, next_query_info, cancel");
|
2020-11-03 21:33:23 +00:00
|
|
|
}
|
2020-11-02 00:47:43 +00:00
|
|
|
|
|
|
|
if (isQueryCancelled())
|
|
|
|
break;
|
|
|
|
|
2020-10-24 00:37:57 +00:00
|
|
|
LOG_DEBUG(log, "Received extra QueryInfo: input_data: {} bytes", query_info.input_data().size());
|
2020-11-03 21:33:23 +00:00
|
|
|
need_input_data_from_query_info = true;
|
2020-10-23 21:48:34 +00:00
|
|
|
}
|
2020-11-03 21:33:23 +00:00
|
|
|
|
|
|
|
return {nullptr, 0}; /// no more input data
|
|
|
|
});
|
|
|
|
|
2022-02-06 16:10:41 +00:00
|
|
|
read_buffer = wrapReadBufferWithCompressionMethod(std::move(read_buffer), input_compression_method);
|
2021-12-15 21:06:17 +00:00
|
|
|
|
2021-07-20 18:18:43 +00:00
|
|
|
assert(!pipeline);
|
2021-10-11 16:11:50 +00:00
|
|
|
auto source = query_context->getInputFormat(
|
|
|
|
input_format, *read_buffer, header, query_context->getSettings().max_insert_block_size);
|
2022-02-06 20:20:53 +00:00
|
|
|
|
2022-05-20 19:49:31 +00:00
|
|
|
pipeline = std::make_unique<QueryPipeline>(std::move(source));
|
2021-07-20 18:18:43 +00:00
|
|
|
pipeline_executor = std::make_unique<PullingPipelineExecutor>(*pipeline);
|
2020-10-11 02:19:01 +00:00
|
|
|
}
|
2020-09-21 22:12:55 +00:00
|
|
|
|
2020-11-03 11:47:34 +00:00
|
|
|
void Call::createExternalTables()
|
|
|
|
{
|
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
for (const auto & external_table : query_info.external_tables())
|
|
|
|
{
|
|
|
|
String name = external_table.name();
|
|
|
|
if (name.empty())
|
|
|
|
name = "_data";
|
|
|
|
auto temporary_id = StorageID::createEmpty();
|
|
|
|
temporary_id.table_name = name;
|
|
|
|
|
|
|
|
/// If such a table does not exist, create it.
|
|
|
|
StoragePtr storage;
|
|
|
|
if (auto resolved = query_context->tryResolveStorageID(temporary_id, Context::ResolveExternal))
|
|
|
|
{
|
2021-04-10 23:33:54 +00:00
|
|
|
storage = DatabaseCatalog::instance().getTable(resolved, query_context);
|
2020-11-03 11:47:34 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
NamesAndTypesList columns;
|
2021-06-15 19:55:21 +00:00
|
|
|
for (size_t column_idx : collections::range(external_table.columns_size()))
|
2020-11-03 11:47:34 +00:00
|
|
|
{
|
2022-10-07 10:46:45 +00:00
|
|
|
/// TODO: consider changing protocol
|
|
|
|
const auto & name_and_type = external_table.columns(static_cast<int>(column_idx));
|
2020-11-03 11:47:34 +00:00
|
|
|
NameAndTypePair column;
|
|
|
|
column.name = name_and_type.name();
|
|
|
|
if (column.name.empty())
|
|
|
|
column.name = "_" + std::to_string(column_idx + 1);
|
|
|
|
column.type = DataTypeFactory::instance().get(name_and_type.type());
|
|
|
|
columns.emplace_back(std::move(column));
|
|
|
|
}
|
2021-04-10 23:33:54 +00:00
|
|
|
auto temporary_table = TemporaryTableHolder(query_context, ColumnsDescription{columns}, {});
|
2020-11-03 11:47:34 +00:00
|
|
|
storage = temporary_table.getTable();
|
|
|
|
query_context->addExternalTable(temporary_id.table_name, std::move(temporary_table));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!external_table.data().empty())
|
|
|
|
{
|
|
|
|
/// The data will be written directly to the table.
|
|
|
|
auto metadata_snapshot = storage->getInMemoryMetadataPtr();
|
2021-09-03 17:29:36 +00:00
|
|
|
auto sink = storage->write(ASTPtr(), metadata_snapshot, query_context);
|
2021-12-15 21:06:17 +00:00
|
|
|
|
|
|
|
std::unique_ptr<ReadBuffer> buf = std::make_unique<ReadBufferFromMemory>(external_table.data().data(), external_table.data().size());
|
|
|
|
buf = wrapReadBufferWithCompressionMethod(std::move(buf), chooseCompressionMethod("", external_table.compression_type()));
|
|
|
|
|
2020-11-03 11:47:34 +00:00
|
|
|
String format = external_table.format();
|
|
|
|
if (format.empty())
|
|
|
|
format = "TabSeparated";
|
2021-05-31 14:49:02 +00:00
|
|
|
ContextMutablePtr external_table_context = query_context;
|
|
|
|
ContextMutablePtr temp_context;
|
2020-11-03 11:47:34 +00:00
|
|
|
if (!external_table.settings().empty())
|
|
|
|
{
|
2021-04-10 23:33:54 +00:00
|
|
|
temp_context = Context::createCopy(query_context);
|
|
|
|
external_table_context = temp_context;
|
2020-11-03 11:47:34 +00:00
|
|
|
SettingsChanges settings_changes;
|
|
|
|
for (const auto & [key, value] : external_table.settings())
|
|
|
|
settings_changes.push_back({key, value});
|
|
|
|
external_table_context->checkSettingsConstraints(settings_changes);
|
|
|
|
external_table_context->applySettingsChanges(settings_changes);
|
|
|
|
}
|
2021-10-11 16:11:50 +00:00
|
|
|
auto in = external_table_context->getInputFormat(
|
2021-12-15 21:06:17 +00:00
|
|
|
format, *buf, metadata_snapshot->getSampleBlock(),
|
2021-10-11 16:11:50 +00:00
|
|
|
external_table_context->getSettings().max_insert_block_size);
|
2021-09-03 17:29:36 +00:00
|
|
|
|
2021-09-14 17:48:18 +00:00
|
|
|
QueryPipelineBuilder cur_pipeline;
|
2021-09-03 17:29:36 +00:00
|
|
|
cur_pipeline.init(Pipe(std::move(in)));
|
|
|
|
cur_pipeline.addTransform(std::move(sink));
|
|
|
|
cur_pipeline.setSinks([&](const Block & header, Pipe::StreamType)
|
|
|
|
{
|
2021-09-26 14:54:59 +00:00
|
|
|
return std::make_shared<EmptySink>(header);
|
2021-09-03 17:29:36 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
auto executor = cur_pipeline.execute();
|
|
|
|
executor->execute(1);
|
2020-11-03 11:47:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!query_info.input_data().empty())
|
|
|
|
{
|
|
|
|
/// External tables must be created before executing query,
|
|
|
|
/// so all external tables must be send no later sending any input data.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!query_info.next_query_info())
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!isInputStreaming(call_type))
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO, "next_query_info is allowed to be set only for streaming input");
|
2020-11-03 11:47:34 +00:00
|
|
|
|
|
|
|
readQueryInfo();
|
|
|
|
if (!query_info.query().empty() || !query_info.query_id().empty() || !query_info.settings().empty()
|
|
|
|
|| !query_info.database().empty() || !query_info.input_data_delimiter().empty()
|
|
|
|
|| !query_info.output_format().empty() || !query_info.user_name().empty() || !query_info.password().empty()
|
|
|
|
|| !query_info.quota().empty() || !query_info.session_id().empty())
|
|
|
|
{
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::INVALID_GRPC_QUERY_INFO,
|
|
|
|
"Extra query infos can be used only "
|
|
|
|
"to add more data to input or more external tables. "
|
|
|
|
"Only the following fields can be set: "
|
|
|
|
"input_data, external_tables, next_query_info, cancel");
|
2020-11-03 11:47:34 +00:00
|
|
|
}
|
|
|
|
if (isQueryCancelled())
|
|
|
|
break;
|
|
|
|
LOG_DEBUG(log, "Received extra QueryInfo: external tables: {}", query_info.external_tables_size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
void Call::generateOutput()
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
2022-02-08 13:15:56 +00:00
|
|
|
/// We add query_id and time_zone to the first result anyway.
|
|
|
|
addQueryDetailsToResult();
|
|
|
|
|
2021-09-27 07:55:33 +00:00
|
|
|
if (!io.pipeline.initialized() || io.pipeline.pushing())
|
2020-10-15 00:45:13 +00:00
|
|
|
return;
|
|
|
|
|
2021-09-26 14:54:59 +00:00
|
|
|
Block header;
|
|
|
|
if (io.pipeline.pulling())
|
|
|
|
header = io.pipeline.getHeader();
|
|
|
|
|
2022-02-06 16:10:41 +00:00
|
|
|
if (output_compression_method != CompressionMethod::None)
|
2021-12-15 21:06:17 +00:00
|
|
|
output.resize(DBMS_DEFAULT_BUFFER_SIZE); /// Must have enough space for compressed data.
|
|
|
|
write_buffer = std::make_unique<WriteBufferFromVector<PODArray<char>>>(output);
|
|
|
|
nested_write_buffer = static_cast<WriteBufferFromVector<PODArray<char>> *>(write_buffer.get());
|
2022-02-06 16:10:41 +00:00
|
|
|
if (output_compression_method != CompressionMethod::None)
|
2021-12-15 21:06:17 +00:00
|
|
|
{
|
2022-02-06 16:10:41 +00:00
|
|
|
write_buffer = wrapWriteBufferWithCompressionMethod(std::move(write_buffer), output_compression_method, output_compression_level);
|
2021-12-15 21:06:17 +00:00
|
|
|
compressing_write_buffer = write_buffer.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto has_output = [&] { return (nested_write_buffer->position() != output.data()) || (compressing_write_buffer && compressing_write_buffer->offset()); };
|
|
|
|
|
2021-10-11 16:11:50 +00:00
|
|
|
output_format_processor = query_context->getOutputFormat(output_format, *write_buffer, header);
|
2020-10-15 00:45:13 +00:00
|
|
|
Stopwatch after_send_progress;
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-11-03 04:37:59 +00:00
|
|
|
/// Unless the input() function is used we are not going to receive input data anymore.
|
|
|
|
if (!input_function_is_used)
|
|
|
|
check_query_info_contains_cancel_only = true;
|
2020-11-02 00:47:43 +00:00
|
|
|
|
2021-09-26 14:54:59 +00:00
|
|
|
if (io.pipeline.pulling())
|
2020-11-02 00:47:43 +00:00
|
|
|
{
|
2021-09-26 14:54:59 +00:00
|
|
|
auto executor = std::make_shared<PullingAsyncPipelineExecutor>(io.pipeline);
|
|
|
|
auto check_for_cancel = [&]
|
2020-11-02 00:47:43 +00:00
|
|
|
{
|
2021-09-26 14:54:59 +00:00
|
|
|
if (isQueryCancelled())
|
|
|
|
{
|
|
|
|
executor->cancel();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
};
|
2020-11-02 00:47:43 +00:00
|
|
|
|
2022-02-10 13:43:12 +00:00
|
|
|
addOutputFormatToResult();
|
2022-02-08 13:15:56 +00:00
|
|
|
addOutputColumnsNamesAndTypesToResult(header);
|
2022-02-10 13:43:12 +00:00
|
|
|
|
2021-09-26 14:54:59 +00:00
|
|
|
Block block;
|
|
|
|
while (check_for_cancel())
|
|
|
|
{
|
|
|
|
if (!executor->pull(block, interactive_delay / 1000))
|
|
|
|
break;
|
2020-11-02 00:47:43 +00:00
|
|
|
|
2021-09-26 14:54:59 +00:00
|
|
|
throwIfFailedToSendResult();
|
|
|
|
if (!check_for_cancel())
|
|
|
|
break;
|
2020-11-01 20:43:50 +00:00
|
|
|
|
2021-09-26 14:54:59 +00:00
|
|
|
if (block && !io.null_format)
|
2021-10-13 12:06:56 +00:00
|
|
|
output_format_processor->write(materializeBlock(block));
|
2020-10-15 00:45:13 +00:00
|
|
|
|
2021-09-26 14:54:59 +00:00
|
|
|
if (after_send_progress.elapsedMicroseconds() >= interactive_delay)
|
|
|
|
{
|
|
|
|
addProgressToResult();
|
|
|
|
after_send_progress.restart();
|
|
|
|
}
|
|
|
|
|
|
|
|
addLogsToResult();
|
|
|
|
|
2021-12-15 21:06:17 +00:00
|
|
|
if (has_output() || result.has_progress() || result.logs_size())
|
2021-09-26 14:54:59 +00:00
|
|
|
sendResult();
|
|
|
|
|
|
|
|
throwIfFailedToSendResult();
|
|
|
|
if (!check_for_cancel())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isQueryCancelled())
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
2021-09-26 14:54:59 +00:00
|
|
|
addTotalsToResult(executor->getTotalsBlock());
|
|
|
|
addExtremesToResult(executor->getExtremesBlock());
|
|
|
|
addProfileInfoToResult(executor->getProfileInfo());
|
2020-10-11 02:19:01 +00:00
|
|
|
}
|
2021-09-26 14:54:59 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
auto executor = std::make_shared<CompletedPipelineExecutor>(io.pipeline);
|
|
|
|
auto callback = [&]() -> bool
|
|
|
|
{
|
|
|
|
throwIfFailedToSendResult();
|
|
|
|
addProgressToResult();
|
|
|
|
addLogsToResult();
|
2020-10-24 00:37:57 +00:00
|
|
|
|
2021-12-15 21:06:17 +00:00
|
|
|
if (has_output() || result.has_progress() || result.logs_size())
|
2021-09-26 14:54:59 +00:00
|
|
|
sendResult();
|
2020-11-01 20:43:50 +00:00
|
|
|
|
2021-09-26 14:54:59 +00:00
|
|
|
throwIfFailedToSendResult();
|
2021-09-27 07:55:33 +00:00
|
|
|
|
|
|
|
return isQueryCancelled();
|
2021-09-26 14:54:59 +00:00
|
|
|
};
|
|
|
|
executor->setCancelCallback(std::move(callback), interactive_delay / 1000);
|
|
|
|
executor->execute();
|
2020-10-11 02:19:01 +00:00
|
|
|
}
|
2020-10-15 00:45:13 +00:00
|
|
|
|
2021-11-11 18:09:21 +00:00
|
|
|
output_format_processor->finalize();
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Call::finishQuery()
|
|
|
|
{
|
2020-10-21 14:35:38 +00:00
|
|
|
finalize = true;
|
2020-10-15 00:45:13 +00:00
|
|
|
io.onFinish();
|
2020-10-24 06:49:45 +00:00
|
|
|
addProgressToResult();
|
2020-10-15 00:45:13 +00:00
|
|
|
query_scope->logPeakMemoryUsage();
|
2020-10-24 00:37:57 +00:00
|
|
|
addLogsToResult();
|
2021-10-10 14:32:01 +00:00
|
|
|
releaseQueryIDAndSessionID();
|
2020-10-21 14:35:38 +00:00
|
|
|
sendResult();
|
|
|
|
close();
|
2020-11-01 21:23:27 +00:00
|
|
|
|
|
|
|
LOG_INFO(
|
|
|
|
log,
|
2020-10-24 22:03:49 +00:00
|
|
|
"Finished call {} in {} secs. (including reading by client: {}, writing by client: {})",
|
|
|
|
getCallName(call_type),
|
2020-11-01 21:23:27 +00:00
|
|
|
query_time.elapsedSeconds(),
|
|
|
|
static_cast<double>(waited_for_client_reading) / 1000000000ULL,
|
|
|
|
static_cast<double>(waited_for_client_writing) / 1000000000ULL);
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
void Call::onException(const Exception & exception)
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
io.onException();
|
|
|
|
|
2023-01-16 23:11:59 +00:00
|
|
|
LOG_ERROR(log, getExceptionMessageAndPattern(exception, /* with_stacktrace */ true));
|
2020-10-23 21:48:34 +00:00
|
|
|
|
2020-10-21 14:35:38 +00:00
|
|
|
if (responder && !responder_finished)
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
2020-10-24 00:37:57 +00:00
|
|
|
try
|
|
|
|
{
|
|
|
|
/// Try to send logs to client, but it could be risky too.
|
|
|
|
addLogsToResult();
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
LOG_WARNING(log, "Couldn't send logs to client");
|
|
|
|
}
|
|
|
|
|
2021-10-10 14:32:01 +00:00
|
|
|
releaseQueryIDAndSessionID();
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
try
|
2020-10-11 02:19:01 +00:00
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
sendException(exception);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
LOG_WARNING(log, "Couldn't send exception information to the client");
|
2020-10-11 02:19:01 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-15 00:45:13 +00:00
|
|
|
|
|
|
|
close();
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-24 00:37:57 +00:00
|
|
|
void Call::onFatalError()
|
|
|
|
{
|
|
|
|
if (responder && !responder_finished)
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
2021-10-10 14:32:01 +00:00
|
|
|
result.mutable_exception()->set_name("FatalError");
|
2020-10-24 00:37:57 +00:00
|
|
|
addLogsToResult();
|
|
|
|
sendResult();
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-10 14:32:01 +00:00
|
|
|
void Call::releaseQueryIDAndSessionID()
|
|
|
|
{
|
|
|
|
/// releaseQueryIDAndSessionID() should be called before sending the final result to the client
|
|
|
|
/// because the client may decide to send another query with the same query ID or session ID
|
|
|
|
/// immediately after it receives our final result, and it's prohibited to have
|
|
|
|
/// two queries executed at the same time with the same query ID or session ID.
|
|
|
|
io.process_list_entry.reset();
|
2021-12-15 21:06:17 +00:00
|
|
|
if (query_context)
|
|
|
|
query_context->setProcessListElement(nullptr);
|
2021-10-10 14:32:01 +00:00
|
|
|
if (session)
|
|
|
|
session->releaseSessionID();
|
|
|
|
}
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
void Call::close()
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
responder.reset();
|
2021-07-20 18:18:43 +00:00
|
|
|
pipeline_executor.reset();
|
|
|
|
pipeline.reset();
|
2021-10-11 16:11:50 +00:00
|
|
|
output_format_processor.reset();
|
2020-11-03 21:33:23 +00:00
|
|
|
read_buffer.reset();
|
|
|
|
write_buffer.reset();
|
2021-12-15 21:06:17 +00:00
|
|
|
nested_write_buffer = nullptr;
|
|
|
|
compressing_write_buffer = nullptr;
|
2020-10-15 00:45:13 +00:00
|
|
|
io = {};
|
|
|
|
query_scope.reset();
|
|
|
|
query_context.reset();
|
2022-07-07 09:43:52 +00:00
|
|
|
thread_trace_context.reset();
|
2020-10-24 16:57:27 +00:00
|
|
|
session.reset();
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-11-01 20:43:50 +00:00
|
|
|
void Call::readQueryInfo()
|
|
|
|
{
|
2020-11-08 10:26:45 +00:00
|
|
|
auto start_reading = [&]
|
|
|
|
{
|
2021-07-31 13:58:54 +00:00
|
|
|
reading_query_info.set(true);
|
2020-11-08 10:26:45 +00:00
|
|
|
responder->read(next_query_info_while_reading, [this](bool ok)
|
|
|
|
{
|
|
|
|
/// Called on queue_thread.
|
2020-11-02 00:47:43 +00:00
|
|
|
if (ok)
|
|
|
|
{
|
|
|
|
const auto & nqi = next_query_info_while_reading;
|
|
|
|
if (check_query_info_contains_cancel_only)
|
|
|
|
{
|
|
|
|
if (!nqi.query().empty() || !nqi.query_id().empty() || !nqi.settings().empty() || !nqi.database().empty()
|
|
|
|
|| !nqi.input_data().empty() || !nqi.input_data_delimiter().empty() || !nqi.output_format().empty()
|
|
|
|
|| !nqi.user_name().empty() || !nqi.password().empty() || !nqi.quota().empty() || !nqi.session_id().empty())
|
|
|
|
{
|
|
|
|
LOG_WARNING(log, "Cannot add extra information to a query which is already executing. Only the 'cancel' field can be set");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (nqi.cancel())
|
|
|
|
want_to_cancel = true;
|
|
|
|
}
|
|
|
|
else
|
2020-11-08 10:26:45 +00:00
|
|
|
{
|
|
|
|
/// We cannot throw an exception right here because this code is executed
|
|
|
|
/// on queue_thread.
|
|
|
|
failed_to_read_query_info = true;
|
|
|
|
}
|
2021-07-31 13:58:54 +00:00
|
|
|
reading_query_info.set(false);
|
2020-11-08 10:26:45 +00:00
|
|
|
});
|
|
|
|
};
|
2020-11-01 20:43:50 +00:00
|
|
|
|
2020-11-08 10:26:45 +00:00
|
|
|
auto finish_reading = [&]
|
2020-11-01 20:43:50 +00:00
|
|
|
{
|
2021-07-31 13:58:54 +00:00
|
|
|
if (reading_query_info.get())
|
2020-11-08 10:26:45 +00:00
|
|
|
{
|
|
|
|
Stopwatch client_writing_watch;
|
2021-07-31 13:58:54 +00:00
|
|
|
reading_query_info.wait(false);
|
2020-11-08 10:26:45 +00:00
|
|
|
waited_for_client_writing += client_writing_watch.elapsedNanoseconds();
|
|
|
|
}
|
|
|
|
throwIfFailedToReadQueryInfo();
|
|
|
|
query_info = std::move(next_query_info_while_reading);
|
|
|
|
initial_query_info_read = true;
|
|
|
|
};
|
2020-11-01 20:43:50 +00:00
|
|
|
|
2020-11-08 10:26:45 +00:00
|
|
|
if (!initial_query_info_read)
|
|
|
|
{
|
|
|
|
/// Initial query info hasn't been read yet, so we're going to read it now.
|
|
|
|
start_reading();
|
|
|
|
}
|
2020-11-01 20:43:50 +00:00
|
|
|
|
2020-11-08 10:26:45 +00:00
|
|
|
/// Maybe it's reading a query info right now. Let it finish.
|
|
|
|
finish_reading();
|
|
|
|
|
2020-11-02 00:47:43 +00:00
|
|
|
if (isInputStreaming(call_type))
|
2020-11-01 20:43:50 +00:00
|
|
|
{
|
2020-11-08 10:26:45 +00:00
|
|
|
/// Next query info can contain more input data. Now we start reading a next query info,
|
|
|
|
/// so another call of readQueryInfo() in the future will probably be able to take it.
|
|
|
|
start_reading();
|
2020-11-01 20:43:50 +00:00
|
|
|
}
|
2020-11-08 10:26:45 +00:00
|
|
|
}
|
2020-11-01 20:43:50 +00:00
|
|
|
|
2020-11-08 10:26:45 +00:00
|
|
|
void Call::throwIfFailedToReadQueryInfo()
|
|
|
|
{
|
|
|
|
if (failed_to_read_query_info)
|
|
|
|
{
|
|
|
|
if (initial_query_info_read)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::NETWORK_ERROR, "Failed to read extra QueryInfo");
|
2020-11-08 10:26:45 +00:00
|
|
|
else
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::NETWORK_ERROR, "Failed to read initial QueryInfo");
|
2020-11-08 10:26:45 +00:00
|
|
|
}
|
2020-11-01 20:43:50 +00:00
|
|
|
}
|
|
|
|
|
2020-11-02 00:47:43 +00:00
|
|
|
bool Call::isQueryCancelled()
|
|
|
|
{
|
|
|
|
if (cancelled)
|
|
|
|
{
|
|
|
|
result.set_cancelled(true);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (want_to_cancel)
|
|
|
|
{
|
|
|
|
LOG_INFO(log, "Query cancelled");
|
|
|
|
cancelled = true;
|
|
|
|
result.set_cancelled(true);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-02-08 13:15:56 +00:00
|
|
|
void Call::addQueryDetailsToResult()
|
|
|
|
{
|
|
|
|
*result.mutable_query_id() = query_context->getClientInfo().current_query_id;
|
|
|
|
*result.mutable_time_zone() = DateLUT::instance().getTimeZone();
|
|
|
|
}
|
|
|
|
|
2022-02-10 13:43:12 +00:00
|
|
|
void Call::addOutputFormatToResult()
|
|
|
|
{
|
|
|
|
*result.mutable_output_format() = output_format;
|
|
|
|
}
|
|
|
|
|
2022-02-08 13:15:56 +00:00
|
|
|
void Call::addOutputColumnsNamesAndTypesToResult(const Block & header)
|
|
|
|
{
|
|
|
|
if (!send_output_columns_names_and_types)
|
|
|
|
return;
|
|
|
|
for (const auto & column : header)
|
|
|
|
{
|
|
|
|
auto & name_and_type = *result.add_output_columns();
|
|
|
|
*name_and_type.mutable_name() = column.name;
|
|
|
|
*name_and_type.mutable_type() = column.type->getName();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 14:35:38 +00:00
|
|
|
void Call::addProgressToResult()
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2022-05-06 15:04:03 +00:00
|
|
|
auto values = progress.fetchValuesAndResetPiecewiseAtomically();
|
2020-10-24 06:49:45 +00:00
|
|
|
if (!values.read_rows && !values.read_bytes && !values.total_rows_to_read && !values.written_rows && !values.written_bytes)
|
|
|
|
return;
|
|
|
|
auto & grpc_progress = *result.mutable_progress();
|
2020-10-24 22:03:49 +00:00
|
|
|
/// Sum is used because we need to accumulate values for the case if streaming output is disabled.
|
|
|
|
grpc_progress.set_read_rows(grpc_progress.read_rows() + values.read_rows);
|
|
|
|
grpc_progress.set_read_bytes(grpc_progress.read_bytes() + values.read_bytes);
|
|
|
|
grpc_progress.set_total_rows_to_read(grpc_progress.total_rows_to_read() + values.total_rows_to_read);
|
|
|
|
grpc_progress.set_written_rows(grpc_progress.written_rows() + values.written_rows);
|
|
|
|
grpc_progress.set_written_bytes(grpc_progress.written_bytes() + values.written_bytes);
|
2020-10-21 14:35:38 +00:00
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-21 14:35:38 +00:00
|
|
|
void Call::addTotalsToResult(const Block & totals)
|
|
|
|
{
|
|
|
|
if (!totals)
|
|
|
|
return;
|
|
|
|
|
2021-12-15 21:06:17 +00:00
|
|
|
PODArray<char> memory;
|
2022-02-06 16:10:41 +00:00
|
|
|
if (output_compression_method != CompressionMethod::None)
|
2021-12-15 21:06:17 +00:00
|
|
|
memory.resize(DBMS_DEFAULT_BUFFER_SIZE); /// Must have enough space for compressed data.
|
|
|
|
std::unique_ptr<WriteBuffer> buf = std::make_unique<WriteBufferFromVector<PODArray<char>>>(memory);
|
2022-02-06 16:10:41 +00:00
|
|
|
buf = wrapWriteBufferWithCompressionMethod(std::move(buf), output_compression_method, output_compression_level);
|
2021-12-15 21:06:17 +00:00
|
|
|
auto format = query_context->getOutputFormat(output_format, *buf, totals);
|
2021-10-13 12:06:56 +00:00
|
|
|
format->write(materializeBlock(totals));
|
2021-11-11 18:09:21 +00:00
|
|
|
format->finalize();
|
2021-12-15 21:06:17 +00:00
|
|
|
buf->finalize();
|
|
|
|
|
|
|
|
result.mutable_totals()->assign(memory.data(), memory.size());
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2020-10-21 14:35:38 +00:00
|
|
|
void Call::addExtremesToResult(const Block & extremes)
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-21 14:35:38 +00:00
|
|
|
if (!extremes)
|
|
|
|
return;
|
|
|
|
|
2021-12-15 21:06:17 +00:00
|
|
|
PODArray<char> memory;
|
2022-02-06 16:10:41 +00:00
|
|
|
if (output_compression_method != CompressionMethod::None)
|
2021-12-15 21:06:17 +00:00
|
|
|
memory.resize(DBMS_DEFAULT_BUFFER_SIZE); /// Must have enough space for compressed data.
|
|
|
|
std::unique_ptr<WriteBuffer> buf = std::make_unique<WriteBufferFromVector<PODArray<char>>>(memory);
|
2022-02-06 16:10:41 +00:00
|
|
|
buf = wrapWriteBufferWithCompressionMethod(std::move(buf), output_compression_method, output_compression_level);
|
2021-12-15 21:06:17 +00:00
|
|
|
auto format = query_context->getOutputFormat(output_format, *buf, extremes);
|
2021-10-13 12:06:56 +00:00
|
|
|
format->write(materializeBlock(extremes));
|
2021-11-11 18:09:21 +00:00
|
|
|
format->finalize();
|
2021-12-15 21:06:17 +00:00
|
|
|
buf->finalize();
|
|
|
|
|
|
|
|
result.mutable_extremes()->assign(memory.data(), memory.size());
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
|
2021-10-15 20:18:20 +00:00
|
|
|
void Call::addProfileInfoToResult(const ProfileInfo & info)
|
2020-11-07 18:00:55 +00:00
|
|
|
{
|
|
|
|
auto & stats = *result.mutable_stats();
|
|
|
|
stats.set_rows(info.rows);
|
|
|
|
stats.set_blocks(info.blocks);
|
|
|
|
stats.set_allocated_bytes(info.bytes);
|
|
|
|
stats.set_applied_limit(info.hasAppliedLimit());
|
|
|
|
stats.set_rows_before_limit(info.getRowsBeforeLimit());
|
|
|
|
}
|
|
|
|
|
2020-10-24 00:37:57 +00:00
|
|
|
void Call::addLogsToResult()
|
|
|
|
{
|
|
|
|
if (!logs_queue)
|
|
|
|
return;
|
|
|
|
|
|
|
|
static_assert(::clickhouse::grpc::LOG_NONE == 0);
|
|
|
|
static_assert(::clickhouse::grpc::LOG_FATAL == static_cast<int>(Poco::Message::PRIO_FATAL));
|
|
|
|
static_assert(::clickhouse::grpc::LOG_CRITICAL == static_cast<int>(Poco::Message::PRIO_CRITICAL));
|
|
|
|
static_assert(::clickhouse::grpc::LOG_ERROR == static_cast<int>(Poco::Message::PRIO_ERROR));
|
|
|
|
static_assert(::clickhouse::grpc::LOG_WARNING == static_cast<int>(Poco::Message::PRIO_WARNING));
|
|
|
|
static_assert(::clickhouse::grpc::LOG_NOTICE == static_cast<int>(Poco::Message::PRIO_NOTICE));
|
|
|
|
static_assert(::clickhouse::grpc::LOG_INFORMATION == static_cast<int>(Poco::Message::PRIO_INFORMATION));
|
|
|
|
static_assert(::clickhouse::grpc::LOG_DEBUG == static_cast<int>(Poco::Message::PRIO_DEBUG));
|
|
|
|
static_assert(::clickhouse::grpc::LOG_TRACE == static_cast<int>(Poco::Message::PRIO_TRACE));
|
|
|
|
|
|
|
|
MutableColumns columns;
|
|
|
|
while (logs_queue->tryPop(columns))
|
|
|
|
{
|
|
|
|
if (columns.empty() || columns[0]->empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const auto & column_time = typeid_cast<const ColumnUInt32 &>(*columns[0]);
|
|
|
|
const auto & column_time_microseconds = typeid_cast<const ColumnUInt32 &>(*columns[1]);
|
|
|
|
const auto & column_query_id = typeid_cast<const ColumnString &>(*columns[3]);
|
|
|
|
const auto & column_thread_id = typeid_cast<const ColumnUInt64 &>(*columns[4]);
|
|
|
|
const auto & column_level = typeid_cast<const ColumnInt8 &>(*columns[5]);
|
|
|
|
const auto & column_source = typeid_cast<const ColumnString &>(*columns[6]);
|
|
|
|
const auto & column_text = typeid_cast<const ColumnString &>(*columns[7]);
|
|
|
|
size_t num_rows = column_time.size();
|
|
|
|
|
|
|
|
for (size_t row = 0; row != num_rows; ++row)
|
|
|
|
{
|
|
|
|
auto & log_entry = *result.add_logs();
|
|
|
|
log_entry.set_time(column_time.getElement(row));
|
|
|
|
log_entry.set_time_microseconds(column_time_microseconds.getElement(row));
|
2022-07-17 15:22:12 +00:00
|
|
|
std::string_view query_id = column_query_id.getDataAt(row).toView();
|
|
|
|
log_entry.set_query_id(query_id.data(), query_id.size());
|
2020-10-24 00:37:57 +00:00
|
|
|
log_entry.set_thread_id(column_thread_id.getElement(row));
|
|
|
|
log_entry.set_level(static_cast<::clickhouse::grpc::LogsLevel>(column_level.getElement(row)));
|
2022-07-17 15:22:12 +00:00
|
|
|
std::string_view source = column_source.getDataAt(row).toView();
|
|
|
|
log_entry.set_source(source.data(), source.size());
|
|
|
|
std::string_view text = column_text.getDataAt(row).toView();
|
|
|
|
log_entry.set_text(text.data(), text.size());
|
2020-10-24 00:37:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 14:35:38 +00:00
|
|
|
void Call::sendResult()
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-21 14:35:38 +00:00
|
|
|
/// gRPC doesn't allow to write anything to a finished responder.
|
|
|
|
if (responder_finished)
|
|
|
|
return;
|
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
/// If output is not streaming then only the final result can be sent.
|
2020-11-02 00:47:43 +00:00
|
|
|
bool send_final_message = finalize || result.has_exception() || result.cancelled();
|
2020-10-24 22:03:49 +00:00
|
|
|
if (!send_final_message && !isOutputStreaming(call_type))
|
|
|
|
return;
|
|
|
|
|
2021-12-15 21:06:17 +00:00
|
|
|
/// Copy output to `result.output`, with optional compressing.
|
|
|
|
if (write_buffer)
|
|
|
|
{
|
|
|
|
size_t output_size;
|
|
|
|
if (send_final_message)
|
|
|
|
{
|
|
|
|
if (compressing_write_buffer)
|
|
|
|
LOG_DEBUG(log, "Compressing final {} bytes", compressing_write_buffer->offset());
|
|
|
|
write_buffer->finalize();
|
|
|
|
output_size = output.size();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (compressing_write_buffer && compressing_write_buffer->offset())
|
|
|
|
{
|
|
|
|
LOG_DEBUG(log, "Compressing {} bytes", compressing_write_buffer->offset());
|
|
|
|
compressing_write_buffer->sync();
|
|
|
|
}
|
|
|
|
output_size = nested_write_buffer->position() - output.data();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (output_size)
|
|
|
|
{
|
|
|
|
result.mutable_output()->assign(output.data(), output_size);
|
|
|
|
nested_write_buffer->restart(); /// We're going to reuse the same buffer again for next block of data.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!send_final_message && result.output().empty() && result.totals().empty() && result.extremes().empty() && !result.logs_size()
|
|
|
|
&& !result.has_progress() && !result.has_stats() && !result.has_exception() && !result.cancelled())
|
|
|
|
return; /// Nothing to send.
|
|
|
|
|
2020-11-01 20:43:50 +00:00
|
|
|
/// Wait for previous write to finish.
|
|
|
|
/// (gRPC doesn't allow to start sending another result while the previous is still being sending.)
|
2021-07-31 13:58:54 +00:00
|
|
|
if (sending_result.get())
|
2020-11-01 20:43:50 +00:00
|
|
|
{
|
2020-11-01 21:23:27 +00:00
|
|
|
Stopwatch client_reading_watch;
|
2021-07-31 13:58:54 +00:00
|
|
|
sending_result.wait(false);
|
2020-11-01 21:23:27 +00:00
|
|
|
waited_for_client_reading += client_reading_watch.elapsedNanoseconds();
|
2020-11-01 20:43:50 +00:00
|
|
|
}
|
|
|
|
throwIfFailedToSendResult();
|
|
|
|
|
|
|
|
/// Start sending the result.
|
2020-11-01 21:23:27 +00:00
|
|
|
LOG_DEBUG(log, "Sending {} result to the client: {}", (send_final_message ? "final" : "intermediate"), getResultDescription(result));
|
|
|
|
|
2021-07-31 13:58:54 +00:00
|
|
|
sending_result.set(true);
|
2020-11-01 20:43:50 +00:00
|
|
|
auto callback = [this](bool ok)
|
|
|
|
{
|
|
|
|
/// Called on queue_thread.
|
|
|
|
if (!ok)
|
|
|
|
failed_to_send_result = true;
|
2021-07-31 13:58:54 +00:00
|
|
|
sending_result.set(false);
|
2020-11-01 20:43:50 +00:00
|
|
|
};
|
|
|
|
|
2020-11-01 21:23:27 +00:00
|
|
|
Stopwatch client_reading_final_watch;
|
2020-10-21 14:35:38 +00:00
|
|
|
if (send_final_message)
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-10-21 14:35:38 +00:00
|
|
|
responder_finished = true;
|
2020-11-01 20:43:50 +00:00
|
|
|
responder->writeAndFinish(result, {}, callback);
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
2020-10-21 14:35:38 +00:00
|
|
|
else
|
2020-11-01 20:43:50 +00:00
|
|
|
responder->write(result, callback);
|
2020-10-21 14:35:38 +00:00
|
|
|
|
2020-11-01 20:43:50 +00:00
|
|
|
/// gRPC has already retrieved all data from `result`, so we don't have to keep it.
|
2020-10-21 14:35:38 +00:00
|
|
|
result.Clear();
|
2020-11-01 20:43:50 +00:00
|
|
|
|
|
|
|
if (send_final_message)
|
|
|
|
{
|
|
|
|
/// Wait until the result is actually sent.
|
2021-07-31 13:58:54 +00:00
|
|
|
sending_result.wait(false);
|
2020-11-01 21:23:27 +00:00
|
|
|
waited_for_client_reading += client_reading_final_watch.elapsedNanoseconds();
|
2020-11-01 20:43:50 +00:00
|
|
|
throwIfFailedToSendResult();
|
2020-11-01 21:23:27 +00:00
|
|
|
LOG_TRACE(log, "Final result has been sent to the client");
|
2020-11-01 20:43:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Call::throwIfFailedToSendResult()
|
|
|
|
{
|
|
|
|
if (failed_to_send_result)
|
2023-01-23 21:13:58 +00:00
|
|
|
throw Exception(ErrorCodes::NETWORK_ERROR, "Failed to send result to the client");
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Call::sendException(const Exception & exception)
|
|
|
|
{
|
|
|
|
auto & grpc_exception = *result.mutable_exception();
|
|
|
|
grpc_exception.set_code(exception.code());
|
2020-10-23 21:48:34 +00:00
|
|
|
grpc_exception.set_name(exception.name());
|
|
|
|
grpc_exception.set_display_text(exception.displayText());
|
|
|
|
if (send_exception_with_stacktrace)
|
|
|
|
grpc_exception.set_stack_trace(exception.getStackTraceString());
|
2020-10-21 14:35:38 +00:00
|
|
|
sendResult();
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
class GRPCServer::Runner
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
explicit Runner(GRPCServer & owner_) : owner(owner_) {}
|
|
|
|
|
|
|
|
~Runner()
|
|
|
|
{
|
|
|
|
if (queue_thread.joinable())
|
|
|
|
queue_thread.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
void start()
|
|
|
|
{
|
|
|
|
startReceivingNewCalls();
|
|
|
|
|
|
|
|
/// We run queue in a separate thread.
|
|
|
|
auto runner_function = [this]
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
run();
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
tryLogCurrentException("GRPCServer");
|
|
|
|
}
|
|
|
|
};
|
|
|
|
queue_thread = ThreadFromGlobalPool{runner_function};
|
|
|
|
}
|
|
|
|
|
|
|
|
void stop() { stopReceivingNewCalls(); }
|
|
|
|
|
|
|
|
size_t getNumCurrentCalls() const
|
|
|
|
{
|
|
|
|
std::lock_guard lock{mutex};
|
|
|
|
return current_calls.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void startReceivingNewCalls()
|
|
|
|
{
|
|
|
|
std::lock_guard lock{mutex};
|
2020-10-24 22:03:49 +00:00
|
|
|
responders_for_new_calls.resize(CALL_MAX);
|
2021-06-15 19:55:21 +00:00
|
|
|
for (CallType call_type : collections::range(CALL_MAX))
|
2020-10-24 22:03:49 +00:00
|
|
|
makeResponderForNewCall(call_type);
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
void makeResponderForNewCall(CallType call_type)
|
2020-10-15 00:45:13 +00:00
|
|
|
{
|
|
|
|
/// `mutex` is already locked.
|
2020-10-24 22:03:49 +00:00
|
|
|
responders_for_new_calls[call_type] = makeResponder(call_type);
|
|
|
|
|
|
|
|
responders_for_new_calls[call_type]->start(
|
|
|
|
owner.grpc_service, *owner.queue, *owner.queue,
|
|
|
|
[this, call_type](bool ok) { onNewCall(call_type, ok); });
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void stopReceivingNewCalls()
|
|
|
|
{
|
|
|
|
std::lock_guard lock{mutex};
|
|
|
|
should_stop = true;
|
|
|
|
}
|
|
|
|
|
2020-10-24 22:03:49 +00:00
|
|
|
void onNewCall(CallType call_type, bool responder_started_ok)
|
2020-10-15 00:45:13 +00:00
|
|
|
{
|
2020-11-01 20:43:50 +00:00
|
|
|
std::lock_guard lock{mutex};
|
2020-10-24 22:03:49 +00:00
|
|
|
auto responder = std::move(responders_for_new_calls[call_type]);
|
2020-10-15 00:45:13 +00:00
|
|
|
if (should_stop)
|
|
|
|
return;
|
2020-10-24 22:03:49 +00:00
|
|
|
makeResponderForNewCall(call_type);
|
2020-11-01 20:43:50 +00:00
|
|
|
if (responder_started_ok)
|
2020-10-15 00:45:13 +00:00
|
|
|
{
|
|
|
|
/// Connection established and the responder has been started.
|
|
|
|
/// So we pass this responder to a Call and make another responder for next connection.
|
2020-10-24 22:03:49 +00:00
|
|
|
auto new_call = std::make_unique<Call>(call_type, std::move(responder), owner.iserver, owner.log);
|
2020-10-15 00:45:13 +00:00
|
|
|
auto * new_call_ptr = new_call.get();
|
|
|
|
current_calls[new_call_ptr] = std::move(new_call);
|
|
|
|
new_call_ptr->start([this, new_call_ptr]() { onFinishCall(new_call_ptr); });
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void onFinishCall(Call * call)
|
|
|
|
{
|
|
|
|
/// Called on call_thread. That's why we can't destroy the `call` right now
|
|
|
|
/// (thread can't join to itself). Thus here we only move the `call` from
|
2021-07-31 13:58:54 +00:00
|
|
|
/// `current_calls` to `finished_calls` and run() will actually destroy the `call`.
|
2020-10-15 00:45:13 +00:00
|
|
|
std::lock_guard lock{mutex};
|
|
|
|
auto it = current_calls.find(call);
|
|
|
|
finished_calls.push_back(std::move(it->second));
|
|
|
|
current_calls.erase(it);
|
|
|
|
}
|
|
|
|
|
|
|
|
void run()
|
|
|
|
{
|
2021-07-31 13:58:54 +00:00
|
|
|
setThreadName("GRPCServerQueue");
|
2020-10-15 00:45:13 +00:00
|
|
|
while (true)
|
|
|
|
{
|
|
|
|
{
|
|
|
|
std::lock_guard lock{mutex};
|
|
|
|
finished_calls.clear(); /// Destroy finished calls.
|
|
|
|
|
|
|
|
/// If (should_stop == true) we continue processing until there is no active calls.
|
2020-10-24 22:03:49 +00:00
|
|
|
if (should_stop && current_calls.empty())
|
|
|
|
{
|
|
|
|
bool all_responders_gone = std::all_of(
|
|
|
|
responders_for_new_calls.begin(), responders_for_new_calls.end(),
|
|
|
|
[](std::unique_ptr<BaseResponder> & responder) { return !responder; });
|
|
|
|
if (all_responders_gone)
|
|
|
|
break;
|
|
|
|
}
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool ok = false;
|
|
|
|
void * tag = nullptr;
|
|
|
|
if (!owner.queue->Next(&tag, &ok))
|
|
|
|
{
|
|
|
|
/// Queue shutted down.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-11-01 20:43:50 +00:00
|
|
|
auto & callback = *static_cast<CompletionCallback *>(tag);
|
|
|
|
callback(ok);
|
2020-10-15 00:45:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
GRPCServer & owner;
|
|
|
|
ThreadFromGlobalPool queue_thread;
|
2020-10-24 22:03:49 +00:00
|
|
|
std::vector<std::unique_ptr<BaseResponder>> responders_for_new_calls;
|
2020-10-15 00:45:13 +00:00
|
|
|
std::map<Call *, std::unique_ptr<Call>> current_calls;
|
|
|
|
std::vector<std::unique_ptr<Call>> finished_calls;
|
|
|
|
bool should_stop = false;
|
|
|
|
mutable std::mutex mutex;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
GRPCServer::GRPCServer(IServer & iserver_, const Poco::Net::SocketAddress & address_to_listen_)
|
Fix SIGSEGV due to accessing GRPCServer::currentConnections() before initialization
This PR makes asynchronous metrics available just after start, and this
breaks GRPCServer, since it requires the server be started before
accessing currentConnections().
However it is possible to trigger the same SIGSEGV even without that
patch, with some timeouts during previous server initilizations and
small asynchronous_metrics_update_period_s.
Fix this by creating GRPCServer::Runner in the ctor.
Stacktrace:
26.842505 [ 7 ] {} <Information> Application: Listening for MySQL compatibility protocol: 0.0.0.0:9004
26.842562 [ 7 ] {} <Information> Application: Listening for gRPC protocol: 0.0.0.0:9100
26.842600 [ 7 ] {} <Debug> AsynchronousMetrics: MemoryTracking: was 350.72 KiB, peak 5.25 MiB, will set to 232.63 MiB (RSS), difference: 232.29 MiB
26.842834 [ 8 ] {} <Trace> BaseDaemon: Received signal 11
26.843014 [ 39 ] {} <Fatal> BaseDaemon: ########################################
26.843055 [ 39 ] {} <Fatal> BaseDaemon: (version 21.2.1.5858, build id: B27D5550AC34F9091BC4437D8021B752EDB34FBB) (from thread 7) (no query) Received signal Segmentation fault (11)
26.843085 [ 39 ] {} <Fatal> BaseDaemon: Address: 0x78 Access: read. Address not mapped to object.
26.843104 [ 39 ] {} <Fatal> BaseDaemon: Stack trace: 0x7fe8c4e6afc4 0x13683cf1 0xf52efe5 0xe7c9137 0x85596e1 0x8553635 0x11b7a313 0x8545ebc 0x8544b25 0x84e1cbe 0x7fe8c4c940b3 0x84ac22e
26.843136 [ 39 ] {} <Fatal> BaseDaemon: 2. pthread_mutex_lock @ 0xbfc4 in /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
26.843162 [ 39 ] {} <Fatal> BaseDaemon: 3. std::__1::mutex::lock() @ 0x13683cf1 in ?
26.843188 [ 39 ] {} <Fatal> BaseDaemon: 4. DB::GRPCServer::currentConnections() const @ 0xf52efe5 in /usr/bin/clickhouse
26.843204 [ 39 ] {} <Fatal> BaseDaemon: 5. DB::AsynchronousMetrics::update() @ 0xe7c9137 in /usr/bin/clickhouse
26.843228 [ 39 ] {} <Fatal> BaseDaemon: 6. DB::AsynchronousMetrics::start() @ 0x85596e1 in /usr/bin/clickhouse
26.843248 [ 39 ] {} <Fatal> BaseDaemon: 7. DB::Server::main(std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&) @ 0x8553635 in /usr/bin/clickhouse
26.843270 [ 39 ] {} <Fatal> BaseDaemon: 8. Poco::Util::Application::run() @ 0x11b7a313 in /usr/bin/clickhouse
26.843284 [ 39 ] {} <Fatal> BaseDaemon: 9. DB::Server::run() @ 0x8545ebc in /usr/bin/clickhouse
26.843299 [ 39 ] {} <Fatal> BaseDaemon: 10. mainEntryClickHouseServer(int, char**) @ 0x8544b25 in /usr/bin/clickhouse
26.843313 [ 39 ] {} <Fatal> BaseDaemon: 11. main @ 0x84e1cbe in /usr/bin/clickhouse
26.843331 [ 39 ] {} <Fatal> BaseDaemon: 12. __libc_start_main @ 0x270b3 in /usr/lib/x86_64-linux-gnu/libc-2.31.so
26.843346 [ 39 ] {} <Fatal> BaseDaemon: 13. _start @ 0x84ac22e in /usr/bin/clickhouse
2021-01-31 08:23:52 +00:00
|
|
|
: iserver(iserver_)
|
|
|
|
, address_to_listen(address_to_listen_)
|
|
|
|
, log(&Poco::Logger::get("GRPCServer"))
|
|
|
|
, runner(std::make_unique<Runner>(*this))
|
2020-10-08 00:23:10 +00:00
|
|
|
{}
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
GRPCServer::~GRPCServer()
|
|
|
|
{
|
|
|
|
/// Server should be shutdown before CompletionQueue.
|
|
|
|
if (grpc_server)
|
|
|
|
grpc_server->Shutdown();
|
|
|
|
|
|
|
|
/// Completion Queue should be shutdown before destroying the runner,
|
|
|
|
/// because the runner is now probably executing CompletionQueue::Next() on queue_thread
|
|
|
|
/// which is blocked until an event is available or the queue is shutting down.
|
|
|
|
if (queue)
|
|
|
|
queue->Shutdown();
|
|
|
|
|
|
|
|
runner.reset();
|
|
|
|
}
|
2020-10-08 00:23:10 +00:00
|
|
|
|
|
|
|
void GRPCServer::start()
|
2020-09-21 22:12:55 +00:00
|
|
|
{
|
2020-12-07 11:27:01 +00:00
|
|
|
initGRPCLogging(iserver.config());
|
2020-10-11 02:19:01 +00:00
|
|
|
grpc::ServerBuilder builder;
|
2020-10-27 14:38:55 +00:00
|
|
|
builder.AddListeningPort(address_to_listen.toString(), makeCredentials(iserver.config()));
|
2020-10-11 02:19:01 +00:00
|
|
|
builder.RegisterService(&grpc_service);
|
2020-10-27 14:38:55 +00:00
|
|
|
builder.SetMaxSendMessageSize(iserver.config().getInt("grpc.max_send_message_size", -1));
|
|
|
|
builder.SetMaxReceiveMessageSize(iserver.config().getInt("grpc.max_receive_message_size", -1));
|
2022-02-06 18:33:31 +00:00
|
|
|
auto default_transport_compression = TransportCompression::fromConfiguration(iserver.config());
|
|
|
|
builder.SetDefaultCompressionAlgorithm(default_transport_compression.algorithm);
|
|
|
|
builder.SetDefaultCompressionLevel(default_transport_compression.level);
|
2020-10-15 00:45:13 +00:00
|
|
|
|
|
|
|
queue = builder.AddCompletionQueue();
|
2020-10-11 02:19:01 +00:00
|
|
|
grpc_server = builder.BuildAndStart();
|
2022-08-25 02:56:53 +00:00
|
|
|
if (nullptr == grpc_server)
|
|
|
|
{
|
2023-01-23 21:13:58 +00:00
|
|
|
throw DB::Exception(DB::ErrorCodes::NETWORK_ERROR, "Can't start grpc server, there is a port conflict");
|
2022-08-25 02:56:53 +00:00
|
|
|
}
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
runner->start();
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
|
|
|
|
2020-10-15 00:45:13 +00:00
|
|
|
|
2020-10-11 02:19:01 +00:00
|
|
|
void GRPCServer::stop()
|
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
/// Stop receiving new calls.
|
|
|
|
runner->stop();
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
|
|
|
|
2020-10-08 00:23:10 +00:00
|
|
|
size_t GRPCServer::currentConnections() const
|
|
|
|
{
|
2020-10-15 00:45:13 +00:00
|
|
|
return runner->getNumCurrentCalls();
|
2020-09-21 22:12:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2020-10-11 02:19:01 +00:00
|
|
|
#endif
|