2017-04-01 09:19:00 +00:00
|
|
|
#include <Interpreters/ProcessList.h>
|
2019-03-22 12:08:30 +00:00
|
|
|
#include <Core/Settings.h>
|
2018-05-17 16:01:41 +00:00
|
|
|
#include <Interpreters/Context.h>
|
2018-10-30 16:31:21 +00:00
|
|
|
#include <Interpreters/DatabaseAndTableWithAlias.h>
|
2018-04-17 17:08:15 +00:00
|
|
|
#include <Parsers/ASTSelectWithUnionQuery.h>
|
|
|
|
#include <Parsers/ASTSelectQuery.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Parsers/ASTKillQueryQuery.h>
|
2018-05-17 16:01:41 +00:00
|
|
|
#include <Common/typeid_cast.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Common/Exception.h>
|
2018-05-29 18:14:31 +00:00
|
|
|
#include <Common/CurrentThread.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <IO/WriteHelpers.h>
|
2019-01-23 14:48:50 +00:00
|
|
|
#include <DataStreams/IBlockInputStream.h>
|
2018-02-01 17:55:08 +00:00
|
|
|
#include <common/logger_useful.h>
|
2018-04-18 20:18:18 +00:00
|
|
|
#include <chrono>
|
|
|
|
|
2017-01-21 04:24:28 +00:00
|
|
|
|
2018-10-08 05:30:03 +00:00
|
|
|
namespace CurrentMetrics
|
|
|
|
{
|
|
|
|
extern const Metric MemoryTracking;
|
|
|
|
}
|
|
|
|
|
2015-06-21 06:06:04 +00:00
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-01-11 21:46:36 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2018-03-09 23:23:15 +00:00
|
|
|
extern const int TOO_MANY_SIMULTANEOUS_QUERIES;
|
2017-04-01 07:20:54 +00:00
|
|
|
extern const int QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING;
|
2018-03-09 22:11:42 +00:00
|
|
|
extern const int LOGICAL_ERROR;
|
2016-01-11 21:46:36 +00:00
|
|
|
}
|
|
|
|
|
2015-06-21 06:06:04 +00:00
|
|
|
|
2018-04-17 17:08:15 +00:00
|
|
|
/// Should we execute the query even if max_concurrent_queries limit is exhausted
|
|
|
|
static bool isUnlimitedQuery(const IAST * ast)
|
|
|
|
{
|
|
|
|
if (!ast)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/// It is KILL QUERY
|
2019-03-11 13:22:51 +00:00
|
|
|
if (ast->as<ASTKillQueryQuery>())
|
2018-04-17 17:08:15 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/// It is SELECT FROM system.processes
|
2018-04-18 21:14:47 +00:00
|
|
|
/// NOTE: This is very rough check.
|
|
|
|
/// False negative: USE system; SELECT * FROM processes;
|
|
|
|
/// False positive: SELECT * FROM system.processes CROSS JOIN (SELECT ...)
|
|
|
|
|
2019-03-11 13:22:51 +00:00
|
|
|
if (const auto * ast_selects = ast->as<ASTSelectWithUnionQuery>())
|
2018-04-17 17:08:15 +00:00
|
|
|
{
|
|
|
|
if (!ast_selects->list_of_selects || ast_selects->list_of_selects->children.empty())
|
|
|
|
return false;
|
|
|
|
|
2019-03-11 13:22:51 +00:00
|
|
|
const auto * ast_select = ast_selects->list_of_selects->children[0]->as<ASTSelectQuery>();
|
2018-04-17 17:08:15 +00:00
|
|
|
if (!ast_select)
|
|
|
|
return false;
|
|
|
|
|
2018-10-30 16:31:21 +00:00
|
|
|
if (auto database_and_table = getDatabaseAndTable(*ast_select, 0))
|
|
|
|
return database_and_table->database == "system" && database_and_table->table == "processes";
|
2018-04-17 17:08:15 +00:00
|
|
|
|
2018-10-30 16:31:21 +00:00
|
|
|
return false;
|
2018-04-17 17:08:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2016-01-11 21:46:36 +00:00
|
|
|
}
|
|
|
|
|
2015-06-21 06:06:04 +00:00
|
|
|
|
2018-10-08 05:30:03 +00:00
|
|
|
ProcessList::ProcessList(size_t max_size_)
|
|
|
|
: max_size(max_size_)
|
|
|
|
{
|
|
|
|
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-17 16:01:41 +00:00
|
|
|
ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * ast, Context & query_context)
|
2015-06-21 06:06:04 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
EntryPtr res;
|
|
|
|
|
2018-05-17 16:01:41 +00:00
|
|
|
const ClientInfo & client_info = query_context.getClientInfo();
|
|
|
|
const Settings & settings = query_context.getSettingsRef();
|
|
|
|
|
2018-03-09 23:04:26 +00:00
|
|
|
if (client_info.current_query_id.empty())
|
|
|
|
throw Exception("Query id cannot be empty", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
2018-04-17 17:08:15 +00:00
|
|
|
bool is_unlimited_query = isUnlimitedQuery(ast);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
{
|
2018-04-18 20:18:18 +00:00
|
|
|
std::unique_lock lock(mutex);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2019-08-28 15:20:22 +00:00
|
|
|
const auto queue_max_wait_ms = settings.queue_max_wait_ms.totalMilliseconds();
|
2018-05-14 16:09:00 +00:00
|
|
|
if (!is_unlimited_query && max_size && processes.size() >= max_size)
|
2018-04-17 17:08:15 +00:00
|
|
|
{
|
2019-10-18 20:35:41 +00:00
|
|
|
if (queue_max_wait_ms)
|
|
|
|
LOG_WARNING(&Logger::get("ProcessList"), "Too many simultaneous queries, will wait " << queue_max_wait_ms << " ms.");
|
2019-08-28 15:20:22 +00:00
|
|
|
if (!queue_max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(queue_max_wait_ms), [&]{ return processes.size() < max_size; }))
|
2018-04-17 17:08:15 +00:00
|
|
|
throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES);
|
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
/** Why we use current user?
|
|
|
|
* Because initial one is passed by client and credentials for it is not verified,
|
|
|
|
* and using initial_user for limits will be insecure.
|
|
|
|
*
|
|
|
|
* Why we use current_query_id?
|
|
|
|
* Because we want to allow distributed queries that will run multiple secondary queries on same server,
|
|
|
|
* like SELECT count() FROM remote('127.0.0.{1,2}', system.numbers)
|
|
|
|
* so they must have different query_ids.
|
|
|
|
*/
|
|
|
|
|
|
|
|
{
|
|
|
|
auto user_process_list = user_to_queries.find(client_info.current_user);
|
|
|
|
|
|
|
|
if (user_process_list != user_to_queries.end())
|
|
|
|
{
|
2018-04-17 17:08:15 +00:00
|
|
|
if (!is_unlimited_query && settings.max_concurrent_queries_for_user
|
2017-04-01 07:20:54 +00:00
|
|
|
&& user_process_list->second.queries.size() >= settings.max_concurrent_queries_for_user)
|
2017-10-25 19:17:37 +00:00
|
|
|
throw Exception("Too many simultaneous queries for user " + client_info.current_user
|
2017-04-01 07:20:54 +00:00
|
|
|
+ ". Current: " + toString(user_process_list->second.queries.size())
|
|
|
|
+ ", maximum: " + settings.max_concurrent_queries_for_user.toString(),
|
2018-03-09 23:23:15 +00:00
|
|
|
ErrorCodes::TOO_MANY_SIMULTANEOUS_QUERIES);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2019-06-30 13:17:27 +00:00
|
|
|
auto running_query = user_process_list->second.queries.find(client_info.current_query_id);
|
|
|
|
|
|
|
|
if (running_query != user_process_list->second.queries.end())
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-03-09 23:04:26 +00:00
|
|
|
if (!settings.replace_running_query)
|
|
|
|
throw Exception("Query with id = " + client_info.current_query_id + " is already running.",
|
|
|
|
ErrorCodes::QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING);
|
|
|
|
|
|
|
|
/// Ask queries to cancel. They will check this flag.
|
2019-06-30 13:17:27 +00:00
|
|
|
running_query->second->is_killed.store(true, std::memory_order_relaxed);
|
|
|
|
|
2019-08-28 15:20:22 +00:00
|
|
|
const auto replace_running_query_max_wait_ms = settings.replace_running_query_max_wait_ms.totalMilliseconds();
|
|
|
|
if (!replace_running_query_max_wait_ms || !have_space.wait_for(lock, std::chrono::milliseconds(replace_running_query_max_wait_ms),
|
|
|
|
[&]
|
2019-06-30 13:17:27 +00:00
|
|
|
{
|
|
|
|
running_query = user_process_list->second.queries.find(client_info.current_query_id);
|
|
|
|
if (running_query == user_process_list->second.queries.end())
|
|
|
|
return true;
|
|
|
|
running_query->second->is_killed.store(true, std::memory_order_relaxed);
|
|
|
|
return false;
|
|
|
|
}))
|
2019-08-28 15:20:22 +00:00
|
|
|
{
|
2019-06-30 13:17:27 +00:00
|
|
|
throw Exception("Query with id = " + client_info.current_query_id + " is already running and can't be stopped",
|
|
|
|
ErrorCodes::QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING);
|
2019-08-28 15:20:22 +00:00
|
|
|
}
|
2019-06-30 13:17:27 +00:00
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-30 13:17:27 +00:00
|
|
|
/// Check other users running query with our query_id
|
|
|
|
for (const auto & user_process_list : user_to_queries)
|
|
|
|
{
|
|
|
|
if (user_process_list.first == client_info.current_user)
|
|
|
|
continue;
|
|
|
|
if (auto running_query = user_process_list.second.queries.find(client_info.current_query_id); running_query != user_process_list.second.queries.end())
|
|
|
|
throw Exception("Query with id = " + client_info.current_query_id + " is already running by user " + user_process_list.first,
|
|
|
|
ErrorCodes::QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING);
|
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
auto process_it = processes.emplace(processes.end(),
|
2018-05-14 16:09:00 +00:00
|
|
|
query_, client_info, settings.max_memory_usage, settings.memory_tracker_fault_probability, priorities.insert(settings.priority));
|
2018-02-01 17:55:08 +00:00
|
|
|
|
|
|
|
res = std::make_shared<Entry>(*this, process_it);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-05-17 16:01:41 +00:00
|
|
|
process_it->query_context = &query_context;
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
if (!client_info.current_query_id.empty())
|
|
|
|
{
|
|
|
|
ProcessListForUser & user_process_list = user_to_queries[client_info.current_user];
|
2018-05-14 16:09:00 +00:00
|
|
|
user_process_list.queries.emplace(client_info.current_query_id, &res->get());
|
|
|
|
|
|
|
|
process_it->setUserProcessList(&user_process_list);
|
2018-02-01 17:55:08 +00:00
|
|
|
|
|
|
|
/// Limits are only raised (to be more relaxed) or set to something instead of zero,
|
|
|
|
/// because settings for different queries will interfere each other:
|
|
|
|
/// setting from one query effectively sets values for all other queries.
|
|
|
|
|
|
|
|
/// Track memory usage for all simultaneously running queries.
|
|
|
|
/// You should specify this value in configuration for default profile,
|
|
|
|
/// not for specific users, sessions or queries,
|
|
|
|
/// because this setting is effectively global.
|
2020-01-21 13:53:30 +00:00
|
|
|
total_memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage_for_all_queries);
|
2018-02-01 17:55:08 +00:00
|
|
|
total_memory_tracker.setDescription("(total)");
|
|
|
|
|
|
|
|
/// Track memory usage for all simultaneously running queries from single user.
|
|
|
|
user_process_list.user_memory_tracker.setParent(&total_memory_tracker);
|
2020-01-21 13:53:30 +00:00
|
|
|
user_process_list.user_memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage_for_user);
|
2018-02-01 17:55:08 +00:00
|
|
|
user_process_list.user_memory_tracker.setDescription("(for user)");
|
|
|
|
|
2018-06-19 20:30:35 +00:00
|
|
|
/// Actualize thread group info
|
2018-06-20 15:21:42 +00:00
|
|
|
if (auto thread_group = CurrentThread::getGroup())
|
2018-06-19 20:30:35 +00:00
|
|
|
{
|
2019-02-08 13:23:10 +00:00
|
|
|
std::lock_guard lock_thread_group(thread_group->mutex);
|
2018-06-19 20:30:35 +00:00
|
|
|
thread_group->performance_counters.setParent(&user_process_list.user_performance_counters);
|
|
|
|
thread_group->memory_tracker.setParent(&user_process_list.user_memory_tracker);
|
|
|
|
thread_group->query = process_it->query;
|
|
|
|
|
2018-06-20 15:21:42 +00:00
|
|
|
/// Set query-level memory trackers
|
2020-01-21 13:53:30 +00:00
|
|
|
thread_group->memory_tracker.setOrRaiseHardLimit(process_it->max_memory_usage);
|
2020-03-03 00:24:44 +00:00
|
|
|
|
|
|
|
if (query_context.hasTraceCollector())
|
|
|
|
{
|
|
|
|
/// Set up memory profiling
|
|
|
|
thread_group->memory_tracker.setOrRaiseProfilerLimit(settings.memory_profiler_step);
|
|
|
|
thread_group->memory_tracker.setProfilerStep(settings.memory_profiler_step);
|
|
|
|
}
|
|
|
|
|
2018-06-19 20:30:35 +00:00
|
|
|
thread_group->memory_tracker.setDescription("(for query)");
|
|
|
|
if (process_it->memory_tracker_fault_probability)
|
|
|
|
thread_group->memory_tracker.setFaultProbability(process_it->memory_tracker_fault_probability);
|
|
|
|
|
|
|
|
/// NOTE: Do not set the limit for thread-level memory tracker since it could show unreal values
|
|
|
|
/// since allocation and deallocation could happen in different threads
|
|
|
|
|
|
|
|
process_it->thread_group = std::move(thread_group);
|
|
|
|
}
|
2017-08-29 13:23:04 +00:00
|
|
|
|
2018-05-14 16:09:00 +00:00
|
|
|
if (!user_process_list.user_throttler)
|
2017-08-29 13:23:04 +00:00
|
|
|
{
|
2018-05-14 16:09:00 +00:00
|
|
|
if (settings.max_network_bandwidth_for_user)
|
|
|
|
user_process_list.user_throttler = std::make_shared<Throttler>(settings.max_network_bandwidth_for_user, total_network_throttler);
|
|
|
|
else if (settings.max_network_bandwidth_for_all_users)
|
|
|
|
user_process_list.user_throttler = total_network_throttler;
|
2017-08-29 13:23:04 +00:00
|
|
|
}
|
2018-03-09 23:04:26 +00:00
|
|
|
}
|
2017-08-29 13:23:04 +00:00
|
|
|
|
2018-03-29 13:24:36 +00:00
|
|
|
if (!total_network_throttler && settings.max_network_bandwidth_for_all_users)
|
2018-03-09 23:04:26 +00:00
|
|
|
{
|
2018-03-29 13:24:36 +00:00
|
|
|
total_network_throttler = std::make_shared<Throttler>(settings.max_network_bandwidth_for_all_users);
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
2015-06-21 06:06:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ProcessListEntry::~ProcessListEntry()
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
/// Destroy all streams to avoid long lock of ProcessList
|
|
|
|
it->releaseQueryStreams();
|
|
|
|
|
2018-10-08 05:30:03 +00:00
|
|
|
std::lock_guard lock(parent.mutex);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-03-09 23:04:26 +00:00
|
|
|
String user = it->getClientInfo().current_user;
|
|
|
|
String query_id = it->getClientInfo().current_query_id;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-05-14 16:09:00 +00:00
|
|
|
const QueryStatus * process_list_element_ptr = &*it;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-04-02 17:37:49 +00:00
|
|
|
/// This removes the memory_tracker of one request.
|
2018-02-01 17:55:08 +00:00
|
|
|
parent.processes.erase(it);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-03-09 22:11:42 +00:00
|
|
|
auto user_process_list_it = parent.user_to_queries.find(user);
|
2018-03-09 23:04:26 +00:00
|
|
|
if (user_process_list_it == parent.user_to_queries.end())
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-03-09 23:04:26 +00:00
|
|
|
LOG_ERROR(&Logger::get("ProcessList"), "Logical error: cannot find user in ProcessList");
|
|
|
|
std::terminate();
|
|
|
|
}
|
2018-03-09 22:11:42 +00:00
|
|
|
|
2018-03-09 23:04:26 +00:00
|
|
|
ProcessListForUser & user_process_list = user_process_list_it->second;
|
2018-03-09 22:11:42 +00:00
|
|
|
|
2018-03-09 23:04:26 +00:00
|
|
|
bool found = false;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2019-06-30 13:17:27 +00:00
|
|
|
if (auto running_query = user_process_list.queries.find(query_id); running_query != user_process_list.queries.end())
|
2018-03-09 23:04:26 +00:00
|
|
|
{
|
2019-06-30 13:17:27 +00:00
|
|
|
if (running_query->second == process_list_element_ptr)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2019-06-30 13:17:27 +00:00
|
|
|
user_process_list.queries.erase(running_query->first);
|
|
|
|
found = true;
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
2018-03-09 23:04:26 +00:00
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-03-09 23:04:26 +00:00
|
|
|
if (!found)
|
|
|
|
{
|
|
|
|
LOG_ERROR(&Logger::get("ProcessList"), "Logical error: cannot find query by query_id and pointer to ProcessListElement in ProcessListForUser");
|
|
|
|
std::terminate();
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
2019-06-30 13:17:27 +00:00
|
|
|
parent.have_space.notify_all();
|
2018-05-14 16:09:00 +00:00
|
|
|
|
2018-03-09 23:04:26 +00:00
|
|
|
/// If there are no more queries for the user, then we will reset memory tracker and network throttler.
|
|
|
|
if (user_process_list.queries.empty())
|
2018-06-19 20:30:35 +00:00
|
|
|
user_process_list.resetTrackers();
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-04-02 17:37:49 +00:00
|
|
|
/// This removes memory_tracker for all requests. At this time, no other memory_trackers live.
|
2020-03-08 21:40:00 +00:00
|
|
|
if (parent.processes.empty())
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Reset MemoryTracker, similarly (see above).
|
2017-04-01 07:20:54 +00:00
|
|
|
parent.total_memory_tracker.logPeakMemoryUsage();
|
|
|
|
parent.total_memory_tracker.reset();
|
2018-03-29 13:24:36 +00:00
|
|
|
parent.total_network_throttler.reset();
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
2015-06-21 06:06:04 +00:00
|
|
|
}
|
|
|
|
|
2015-09-04 20:52:00 +00:00
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
QueryStatus::QueryStatus(
|
|
|
|
const String & query_,
|
|
|
|
const ClientInfo & client_info_,
|
2018-06-19 20:30:35 +00:00
|
|
|
size_t max_memory_usage_,
|
|
|
|
double memory_tracker_fault_probability_,
|
2018-02-01 17:55:08 +00:00
|
|
|
QueryPriorities::Handle && priority_handle_)
|
|
|
|
:
|
|
|
|
query(query_),
|
|
|
|
client_info(client_info_),
|
|
|
|
priority_handle(std::move(priority_handle_)),
|
2018-06-19 20:30:35 +00:00
|
|
|
num_queries_increment{CurrentMetrics::Query},
|
|
|
|
max_memory_usage(max_memory_usage_),
|
|
|
|
memory_tracker_fault_probability(memory_tracker_fault_probability_)
|
2018-02-01 17:55:08 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-06-09 15:29:08 +00:00
|
|
|
QueryStatus::~QueryStatus() = default;
|
2018-02-01 17:55:08 +00:00
|
|
|
|
|
|
|
void QueryStatus::setQueryStreams(const BlockIO & io)
|
2016-11-30 17:31:05 +00:00
|
|
|
{
|
2019-01-02 06:44:36 +00:00
|
|
|
std::lock_guard lock(query_streams_mutex);
|
2017-02-07 10:40:29 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
query_stream_in = io.in;
|
|
|
|
query_stream_out = io.out;
|
2018-04-17 15:16:32 +00:00
|
|
|
query_streams_status = QueryStreamsStatus::Initialized;
|
2017-02-03 16:15:12 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
void QueryStatus::releaseQueryStreams()
|
2017-02-03 16:15:12 +00:00
|
|
|
{
|
2018-04-17 15:16:32 +00:00
|
|
|
BlockInputStreamPtr in;
|
|
|
|
BlockOutputStreamPtr out;
|
2017-02-03 16:15:12 +00:00
|
|
|
|
2018-04-17 15:16:32 +00:00
|
|
|
{
|
2019-01-02 06:44:36 +00:00
|
|
|
std::lock_guard lock(query_streams_mutex);
|
2018-04-17 15:16:32 +00:00
|
|
|
|
|
|
|
query_streams_status = QueryStreamsStatus::Released;
|
|
|
|
in = std::move(query_stream_in);
|
|
|
|
out = std::move(query_stream_out);
|
|
|
|
}
|
2017-02-03 16:15:12 +00:00
|
|
|
|
2018-04-17 15:16:32 +00:00
|
|
|
/// Destroy streams outside the mutex lock
|
2017-02-03 16:15:12 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
bool QueryStatus::streamsAreReleased()
|
2017-02-03 16:15:12 +00:00
|
|
|
{
|
2019-01-02 06:44:36 +00:00
|
|
|
std::lock_guard lock(query_streams_mutex);
|
2017-02-03 16:15:12 +00:00
|
|
|
|
2018-04-17 15:16:32 +00:00
|
|
|
return query_streams_status == QueryStreamsStatus::Released;
|
2016-11-30 17:31:05 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
bool QueryStatus::tryGetQueryStreams(BlockInputStreamPtr & in, BlockOutputStreamPtr & out) const
|
2016-11-30 17:31:05 +00:00
|
|
|
{
|
2019-01-02 06:44:36 +00:00
|
|
|
std::lock_guard lock(query_streams_mutex);
|
2017-02-03 16:15:12 +00:00
|
|
|
|
2018-04-17 15:16:32 +00:00
|
|
|
if (query_streams_status != QueryStreamsStatus::Initialized)
|
2017-04-01 07:20:54 +00:00
|
|
|
return false;
|
2016-11-30 17:31:05 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
in = query_stream_in;
|
|
|
|
out = query_stream_out;
|
|
|
|
return true;
|
2016-11-30 17:31:05 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 01:48:25 +00:00
|
|
|
CancellationCode QueryStatus::cancelQuery(bool kill)
|
|
|
|
{
|
|
|
|
/// Streams are destroyed, and ProcessListElement will be deleted from ProcessList soon. We need wait a little bit
|
|
|
|
if (streamsAreReleased())
|
|
|
|
return CancellationCode::CancelSent;
|
|
|
|
|
|
|
|
BlockInputStreamPtr input_stream;
|
|
|
|
BlockOutputStreamPtr output_stream;
|
|
|
|
|
|
|
|
if (tryGetQueryStreams(input_stream, output_stream))
|
|
|
|
{
|
|
|
|
if (input_stream)
|
|
|
|
{
|
|
|
|
input_stream->cancel(kill);
|
|
|
|
return CancellationCode::CancelSent;
|
|
|
|
}
|
|
|
|
return CancellationCode::CancelCannotBeSent;
|
|
|
|
}
|
|
|
|
/// Query is not even started
|
|
|
|
is_killed.store(true);
|
|
|
|
return CancellationCode::CancelSent;
|
|
|
|
}
|
|
|
|
|
2016-11-30 17:31:05 +00:00
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
void QueryStatus::setUserProcessList(ProcessListForUser * user_process_list_)
|
|
|
|
{
|
|
|
|
user_process_list = user_process_list_;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-14 16:09:00 +00:00
|
|
|
ThrottlerPtr QueryStatus::getUserNetworkThrottler()
|
2015-09-04 20:52:00 +00:00
|
|
|
{
|
2018-03-09 23:04:26 +00:00
|
|
|
if (!user_process_list)
|
|
|
|
return {};
|
|
|
|
return user_process_list->user_throttler;
|
2015-09-04 20:52:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
QueryStatus * ProcessList::tryGetProcessListElement(const String & current_query_id, const String & current_user)
|
2017-01-24 15:11:36 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
auto user_it = user_to_queries.find(current_user);
|
|
|
|
if (user_it != user_to_queries.end())
|
|
|
|
{
|
|
|
|
const auto & user_queries = user_it->second.queries;
|
|
|
|
auto query_it = user_queries.find(current_query_id);
|
2017-01-24 15:11:36 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
if (query_it != user_queries.end())
|
|
|
|
return query_it->second;
|
|
|
|
}
|
2017-01-24 15:11:36 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
return nullptr;
|
2017-01-24 15:11:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-02-01 01:48:25 +00:00
|
|
|
CancellationCode ProcessList::sendCancelToQuery(const String & current_query_id, const String & current_user, bool kill)
|
2016-11-30 17:31:05 +00:00
|
|
|
{
|
2019-01-02 06:44:36 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2016-11-30 17:31:05 +00:00
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
QueryStatus * elem = tryGetProcessListElement(current_query_id, current_user);
|
2017-01-24 15:11:36 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
if (!elem)
|
|
|
|
return CancellationCode::NotFound;
|
2017-01-24 15:11:36 +00:00
|
|
|
|
2019-02-01 01:48:25 +00:00
|
|
|
return elem->cancelQuery(kill);
|
2016-11-30 17:31:05 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
|
2019-07-30 23:12:04 +00:00
|
|
|
void ProcessList::killAllQueries()
|
|
|
|
{
|
|
|
|
std::lock_guard lock(mutex);
|
|
|
|
|
|
|
|
for (auto & process : processes)
|
|
|
|
process.cancelQuery(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-17 16:01:41 +00:00
|
|
|
QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_events, bool get_settings) const
|
|
|
|
{
|
|
|
|
QueryStatusInfo res;
|
|
|
|
|
|
|
|
res.query = query;
|
|
|
|
res.client_info = client_info;
|
|
|
|
res.elapsed_seconds = watch.elapsedSeconds();
|
|
|
|
res.is_cancelled = is_killed.load(std::memory_order_relaxed);
|
2019-05-20 11:37:41 +00:00
|
|
|
res.read_rows = progress_in.read_rows;
|
|
|
|
res.read_bytes = progress_in.read_bytes;
|
|
|
|
res.total_rows = progress_in.total_rows_to_read;
|
2019-05-21 04:06:36 +00:00
|
|
|
|
2019-05-20 11:37:41 +00:00
|
|
|
/// TODO: Use written_rows and written_bytes when real time progress is implemented
|
|
|
|
res.written_rows = progress_out.read_rows;
|
|
|
|
res.written_bytes = progress_out.read_bytes;
|
2018-05-17 16:01:41 +00:00
|
|
|
|
2018-06-19 20:30:35 +00:00
|
|
|
if (thread_group)
|
2018-05-17 16:01:41 +00:00
|
|
|
{
|
2018-06-19 20:30:35 +00:00
|
|
|
res.memory_usage = thread_group->memory_tracker.get();
|
|
|
|
res.peak_memory_usage = thread_group->memory_tracker.getPeak();
|
2018-05-17 16:01:41 +00:00
|
|
|
|
2018-06-19 20:30:35 +00:00
|
|
|
if (get_thread_list)
|
|
|
|
{
|
2019-02-08 13:23:10 +00:00
|
|
|
std::lock_guard lock(thread_group->mutex);
|
2020-02-02 02:27:15 +00:00
|
|
|
res.thread_ids = thread_group->thread_ids;
|
2018-06-19 20:30:35 +00:00
|
|
|
}
|
2018-05-17 16:01:41 +00:00
|
|
|
|
2018-06-19 20:30:35 +00:00
|
|
|
if (get_profile_events)
|
|
|
|
res.profile_counters = std::make_shared<ProfileEvents::Counters>(thread_group->performance_counters.getPartiallyAtomicSnapshot());
|
|
|
|
}
|
2018-05-17 16:01:41 +00:00
|
|
|
|
|
|
|
if (get_settings && query_context)
|
|
|
|
res.query_settings = std::make_shared<Settings>(query_context->getSettingsRef());
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ProcessList::Info ProcessList::getInfo(bool get_thread_list, bool get_profile_events, bool get_settings) const
|
|
|
|
{
|
|
|
|
Info per_query_infos;
|
|
|
|
|
2019-01-02 06:44:36 +00:00
|
|
|
std::lock_guard lock(mutex);
|
2018-05-17 16:01:41 +00:00
|
|
|
|
|
|
|
per_query_infos.reserve(processes.size());
|
|
|
|
for (const auto & process : processes)
|
|
|
|
per_query_infos.emplace_back(process.getInfo(get_thread_list, get_profile_events, get_settings));
|
|
|
|
|
|
|
|
return per_query_infos;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-09 15:29:08 +00:00
|
|
|
ProcessListForUser::ProcessListForUser() = default;
|
2018-02-01 17:55:08 +00:00
|
|
|
|
|
|
|
|
2020-03-20 17:27:17 +00:00
|
|
|
ProcessListForUserInfo ProcessListForUser::getInfo(bool get_profile_events) const
|
|
|
|
{
|
|
|
|
ProcessListForUserInfo res;
|
|
|
|
|
|
|
|
res.memory_usage = user_memory_tracker.get();
|
|
|
|
res.peak_memory_usage = user_memory_tracker.getPeak();
|
|
|
|
|
|
|
|
if (get_profile_events)
|
|
|
|
res.profile_counters = std::make_shared<ProfileEvents::Counters>(user_performance_counters.getPartiallyAtomicSnapshot());
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ProcessList::UserInfo ProcessList::getUserInfo(bool get_profile_events) const
|
|
|
|
{
|
|
|
|
UserInfo per_user_infos;
|
|
|
|
|
|
|
|
std::lock_guard lock(mutex);
|
|
|
|
|
|
|
|
per_user_infos.reserve(user_to_queries.size());
|
|
|
|
|
|
|
|
for (const auto & [user, user_queries] : user_to_queries)
|
|
|
|
per_user_infos.emplace(user, user_queries.getInfo(get_profile_events));
|
|
|
|
|
|
|
|
return per_user_infos;
|
|
|
|
}
|
|
|
|
|
2015-06-21 06:06:04 +00:00
|
|
|
}
|