2017-04-01 09:19:00 +00:00
|
|
|
#include <Interpreters/ProcessList.h>
|
|
|
|
#include <Interpreters/Settings.h>
|
|
|
|
#include <Parsers/ASTKillQueryQuery.h>
|
|
|
|
#include <Common/Exception.h>
|
|
|
|
#include <IO/WriteHelpers.h>
|
|
|
|
#include <DataStreams/IProfilingBlockInputStream.h>
|
2017-07-13 20:58:19 +00:00
|
|
|
#include <Common/typeid_cast.h>
|
2018-02-01 17:55:08 +00:00
|
|
|
#include <common/logger_useful.h>
|
|
|
|
#include <pthread.h>
|
2017-01-21 04:24:28 +00:00
|
|
|
|
2015-06-21 06:06:04 +00:00
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-01-11 21:46:36 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
extern const int TOO_MUCH_SIMULTANEOUS_QUERIES;
|
|
|
|
extern const int QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING;
|
2016-01-11 21:46:36 +00:00
|
|
|
}
|
|
|
|
|
2015-06-21 06:06:04 +00:00
|
|
|
|
|
|
|
ProcessList::EntryPtr ProcessList::insert(
|
2017-04-01 07:20:54 +00:00
|
|
|
const String & query_, const IAST * ast, const ClientInfo & client_info, const Settings & settings)
|
2015-06-21 06:06:04 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
EntryPtr res;
|
|
|
|
bool is_kill_query = ast && typeid_cast<const ASTKillQueryQuery *>(ast);
|
|
|
|
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
if (!is_kill_query && max_size && processes.size() >= max_size
|
2017-04-01 07:20:54 +00:00
|
|
|
&& (!settings.queue_max_wait_ms.totalMilliseconds() || !have_space.tryWait(mutex, settings.queue_max_wait_ms.totalMilliseconds())))
|
2017-10-25 19:17:37 +00:00
|
|
|
throw Exception("Too many simultaneous queries. Maximum: " + toString(max_size), ErrorCodes::TOO_MUCH_SIMULTANEOUS_QUERIES);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
/** Why we use current user?
|
|
|
|
* Because initial one is passed by client and credentials for it is not verified,
|
|
|
|
* and using initial_user for limits will be insecure.
|
|
|
|
*
|
|
|
|
* Why we use current_query_id?
|
|
|
|
* Because we want to allow distributed queries that will run multiple secondary queries on same server,
|
|
|
|
* like SELECT count() FROM remote('127.0.0.{1,2}', system.numbers)
|
|
|
|
* so they must have different query_ids.
|
|
|
|
*/
|
|
|
|
|
|
|
|
{
|
|
|
|
auto user_process_list = user_to_queries.find(client_info.current_user);
|
|
|
|
|
|
|
|
if (user_process_list != user_to_queries.end())
|
|
|
|
{
|
|
|
|
if (!is_kill_query && settings.max_concurrent_queries_for_user
|
|
|
|
&& user_process_list->second.queries.size() >= settings.max_concurrent_queries_for_user)
|
2017-10-25 19:17:37 +00:00
|
|
|
throw Exception("Too many simultaneous queries for user " + client_info.current_user
|
2017-04-01 07:20:54 +00:00
|
|
|
+ ". Current: " + toString(user_process_list->second.queries.size())
|
|
|
|
+ ", maximum: " + settings.max_concurrent_queries_for_user.toString(),
|
|
|
|
ErrorCodes::TOO_MUCH_SIMULTANEOUS_QUERIES);
|
|
|
|
|
|
|
|
if (!client_info.current_query_id.empty())
|
|
|
|
{
|
|
|
|
auto element = user_process_list->second.queries.find(client_info.current_query_id);
|
|
|
|
if (element != user_process_list->second.queries.end())
|
|
|
|
{
|
|
|
|
if (!settings.replace_running_query)
|
|
|
|
throw Exception("Query with id = " + client_info.current_query_id + " is already running.",
|
|
|
|
ErrorCodes::QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING);
|
|
|
|
|
|
|
|
/// Kill query could be replaced since system.processes is continuously updated
|
|
|
|
element->second->is_cancelled = true;
|
2017-04-02 17:37:49 +00:00
|
|
|
/// If the request is canceled, the data about it is deleted from the map at the time of cancellation.
|
2017-04-01 07:20:54 +00:00
|
|
|
user_process_list->second.queries.erase(element);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
auto process_it = processes.emplace(processes.end(),
|
2017-04-01 07:20:54 +00:00
|
|
|
query_, client_info,
|
|
|
|
settings.limits.max_memory_usage, settings.memory_tracker_fault_probability,
|
2018-02-01 17:55:08 +00:00
|
|
|
priorities.insert(settings.priority));
|
|
|
|
|
|
|
|
res = std::make_shared<Entry>(*this, process_it);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
if (!client_info.current_query_id.empty())
|
|
|
|
{
|
|
|
|
ProcessListForUser & user_process_list = user_to_queries[client_info.current_user];
|
2018-02-01 17:55:08 +00:00
|
|
|
user_process_list.queries[client_info.current_query_id] = &*process_it;
|
|
|
|
|
|
|
|
/// Limits are only raised (to be more relaxed) or set to something instead of zero,
|
|
|
|
/// because settings for different queries will interfere each other:
|
|
|
|
/// setting from one query effectively sets values for all other queries.
|
|
|
|
|
|
|
|
/// Track memory usage for all simultaneously running queries.
|
|
|
|
/// You should specify this value in configuration for default profile,
|
|
|
|
/// not for specific users, sessions or queries,
|
|
|
|
/// because this setting is effectively global.
|
|
|
|
total_memory_tracker.setOrRaiseLimit(settings.limits.max_memory_usage_for_all_queries);
|
|
|
|
total_memory_tracker.setDescription("(total)");
|
|
|
|
|
|
|
|
/// Track memory usage for all simultaneously running queries from single user.
|
|
|
|
user_process_list.user_memory_tracker.setParent(&total_memory_tracker);
|
|
|
|
user_process_list.user_memory_tracker.setOrRaiseLimit(settings.limits.max_memory_usage_for_user);
|
|
|
|
user_process_list.user_memory_tracker.setDescription("(for user)");
|
|
|
|
|
|
|
|
/// Query-level memory tracker is already set in the QueryStatus constructor
|
|
|
|
|
|
|
|
if (!current_thread)
|
|
|
|
throw Exception("Thread is not initialized", ErrorCodes::LOGICAL_ERROR);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
if (current_thread)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-02-01 17:55:08 +00:00
|
|
|
current_thread->setCurrentThreadParentQuery(&*process_it);
|
|
|
|
current_thread->memory_tracker.setOrRaiseLimit(settings.limits.max_memory_usage);
|
|
|
|
current_thread->memory_tracker.setDescription("(for thread)");
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
2017-08-29 13:23:04 +00:00
|
|
|
|
|
|
|
if (settings.limits.max_network_bandwidth_for_user && !user_process_list.user_throttler)
|
|
|
|
{
|
|
|
|
user_process_list.user_throttler = std::make_shared<Throttler>(settings.limits.max_network_bandwidth_for_user, 0,
|
|
|
|
"Network bandwidth limit for a user exceeded.");
|
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
process_it->setUserProcessList(&user_process_list);
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
2015-06-21 06:06:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ProcessListEntry::~ProcessListEntry()
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
/// Destroy all streams to avoid long lock of ProcessList
|
|
|
|
it->releaseQueryStreams();
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
/// Finalize all threads statuses
|
|
|
|
{
|
2018-03-01 16:52:24 +00:00
|
|
|
current_thread->onExit();
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
std::lock_guard lock(it->threads_mutex);
|
|
|
|
|
|
|
|
for (auto & elem : it->thread_statuses)
|
|
|
|
{
|
|
|
|
auto & thread_status = elem.second;
|
|
|
|
thread_status->reset();
|
2018-03-01 16:52:24 +00:00
|
|
|
};
|
2018-02-01 17:55:08 +00:00
|
|
|
}
|
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(parent.mutex);
|
|
|
|
|
2017-04-02 17:37:49 +00:00
|
|
|
/// The order of removing memory_trackers is important.
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
String user = it->client_info.current_user;
|
|
|
|
String query_id = it->client_info.current_query_id;
|
|
|
|
bool is_cancelled = it->is_cancelled;
|
|
|
|
|
2017-04-02 17:37:49 +00:00
|
|
|
/// This removes the memory_tracker of one request.
|
2018-02-01 17:55:08 +00:00
|
|
|
parent.processes.erase(it);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-29 13:23:04 +00:00
|
|
|
auto user_process_list = parent.user_to_queries.find(user);
|
2017-04-01 07:20:54 +00:00
|
|
|
if (user_process_list != parent.user_to_queries.end())
|
|
|
|
{
|
2017-04-02 17:37:49 +00:00
|
|
|
/// In case the request is canceled, the data about it is deleted from the map at the time of cancellation, and not here.
|
2017-04-01 07:20:54 +00:00
|
|
|
if (!is_cancelled && !query_id.empty())
|
|
|
|
{
|
2017-08-29 13:23:04 +00:00
|
|
|
auto element = user_process_list->second.queries.find(query_id);
|
2017-04-01 07:20:54 +00:00
|
|
|
if (element != user_process_list->second.queries.end())
|
|
|
|
user_process_list->second.queries.erase(element);
|
|
|
|
}
|
|
|
|
|
2017-04-02 17:37:49 +00:00
|
|
|
/// This removes the memory_tracker from the user. At this time, the memory_tracker that references it does not live.
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2017-08-29 20:22:23 +00:00
|
|
|
/// If there are no more queries for the user, then we delete the entry.
|
2017-04-02 17:37:49 +00:00
|
|
|
/// This also clears the MemoryTracker for the user, and a message about the memory consumption is output to the log.
|
2017-08-29 20:22:23 +00:00
|
|
|
/// This also clears network bandwidth Throttler, so it will not count periods of inactivity.
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Sometimes it is important to reset the MemoryTracker, because it may accumulate skew
|
|
|
|
/// due to the fact that there are cases when memory can be allocated while processing the request, but released later.
|
2017-04-01 07:20:54 +00:00
|
|
|
if (user_process_list->second.queries.empty())
|
|
|
|
parent.user_to_queries.erase(user_process_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
parent.have_space.signal();
|
|
|
|
|
2017-04-02 17:37:49 +00:00
|
|
|
/// This removes memory_tracker for all requests. At this time, no other memory_trackers live.
|
2018-02-01 17:55:08 +00:00
|
|
|
if (parent.processes.size() == 0)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2017-04-02 17:37:49 +00:00
|
|
|
/// Reset MemoryTracker, similarly (see above).
|
2017-04-01 07:20:54 +00:00
|
|
|
parent.total_memory_tracker.logPeakMemoryUsage();
|
|
|
|
parent.total_memory_tracker.reset();
|
|
|
|
}
|
2015-06-21 06:06:04 +00:00
|
|
|
}
|
|
|
|
|
2015-09-04 20:52:00 +00:00
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
QueryStatus::QueryStatus(
|
|
|
|
const String & query_,
|
|
|
|
const ClientInfo & client_info_,
|
|
|
|
size_t max_memory_usage,
|
|
|
|
double memory_tracker_fault_probability,
|
|
|
|
QueryPriorities::Handle && priority_handle_)
|
|
|
|
:
|
|
|
|
query(query_),
|
|
|
|
client_info(client_info_),
|
|
|
|
priority_handle(std::move(priority_handle_)),
|
|
|
|
performance_counters(ProfileEvents::Level::Process),
|
|
|
|
num_queries_increment{CurrentMetrics::Query}
|
|
|
|
{
|
|
|
|
memory_tracker.setOrRaiseLimit(max_memory_usage);
|
|
|
|
memory_tracker.setDescription("(for query)");
|
|
|
|
|
|
|
|
if (memory_tracker_fault_probability)
|
|
|
|
memory_tracker.setFaultProbability(memory_tracker_fault_probability);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void QueryStatus::setQueryStreams(const BlockIO & io)
|
2016-11-30 17:31:05 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(query_streams_mutex);
|
2017-02-07 10:40:29 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
query_stream_in = io.in;
|
|
|
|
query_stream_out = io.out;
|
|
|
|
query_streams_initialized = true;
|
2017-02-03 16:15:12 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
void QueryStatus::releaseQueryStreams()
|
2017-02-03 16:15:12 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(query_streams_mutex);
|
2017-02-03 16:15:12 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
query_streams_initialized = false;
|
|
|
|
query_streams_released = true;
|
|
|
|
query_stream_in.reset();
|
|
|
|
query_stream_out.reset();
|
2017-02-03 16:15:12 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
bool QueryStatus::streamsAreReleased()
|
2017-02-03 16:15:12 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(query_streams_mutex);
|
2017-02-03 16:15:12 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
return query_streams_released;
|
2016-11-30 17:31:05 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
bool QueryStatus::tryGetQueryStreams(BlockInputStreamPtr & in, BlockOutputStreamPtr & out) const
|
2016-11-30 17:31:05 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(query_streams_mutex);
|
2017-02-03 16:15:12 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
if (!query_streams_initialized)
|
|
|
|
return false;
|
2016-11-30 17:31:05 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
in = query_stream_in;
|
|
|
|
out = query_stream_out;
|
|
|
|
return true;
|
2016-11-30 17:31:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
void QueryStatus::setUserProcessList(ProcessListForUser * user_process_list_)
|
|
|
|
{
|
|
|
|
user_process_list = user_process_list_;
|
|
|
|
performance_counters.parent = &user_process_list->user_performance_counters;
|
|
|
|
memory_tracker.setParent(&user_process_list->user_memory_tracker);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void ProcessList::addTemporaryTable(QueryStatus & elem, const String & table_name, const StoragePtr & storage)
|
2015-09-04 20:52:00 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
2015-09-04 20:52:00 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
elem.temporary_tables[table_name] = storage;
|
2015-09-04 20:52:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
QueryStatus * ProcessList::tryGetProcessListElement(const String & current_query_id, const String & current_user)
|
2017-01-24 15:11:36 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
auto user_it = user_to_queries.find(current_user);
|
|
|
|
if (user_it != user_to_queries.end())
|
|
|
|
{
|
|
|
|
const auto & user_queries = user_it->second.queries;
|
|
|
|
auto query_it = user_queries.find(current_query_id);
|
2017-01-24 15:11:36 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
if (query_it != user_queries.end())
|
|
|
|
return query_it->second;
|
|
|
|
}
|
2017-01-24 15:11:36 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
return nullptr;
|
2017-01-24 15:11:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-05 21:09:39 +00:00
|
|
|
ProcessList::CancellationCode ProcessList::sendCancelToQuery(const String & current_query_id, const String & current_user, bool kill)
|
2016-11-30 17:31:05 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
std::lock_guard<std::mutex> lock(mutex);
|
2016-11-30 17:31:05 +00:00
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
QueryStatus * elem = tryGetProcessListElement(current_query_id, current_user);
|
2017-01-24 15:11:36 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
if (!elem)
|
|
|
|
return CancellationCode::NotFound;
|
2017-01-24 15:11:36 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
/// Streams are destroyed, and ProcessListElement will be deleted from ProcessList soon. We need wait a little bit
|
|
|
|
if (elem->streamsAreReleased())
|
|
|
|
return CancellationCode::CancelSent;
|
2017-02-07 10:40:29 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
BlockInputStreamPtr input_stream;
|
|
|
|
BlockOutputStreamPtr output_stream;
|
2016-11-30 17:31:05 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
if (elem->tryGetQueryStreams(input_stream, output_stream))
|
|
|
|
{
|
2017-09-07 21:04:48 +00:00
|
|
|
IProfilingBlockInputStream * input_stream_casted;
|
2017-04-01 07:20:54 +00:00
|
|
|
if (input_stream && (input_stream_casted = dynamic_cast<IProfilingBlockInputStream *>(input_stream.get())))
|
|
|
|
{
|
2018-03-05 21:09:39 +00:00
|
|
|
input_stream_casted->cancel(kill);
|
2017-04-01 07:20:54 +00:00
|
|
|
return CancellationCode::CancelSent;
|
|
|
|
}
|
|
|
|
return CancellationCode::CancelCannotBeSent;
|
|
|
|
}
|
2017-02-03 16:15:12 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
return CancellationCode::QueryIsNotInitializedYet;
|
2016-11-30 17:31:05 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 17:55:08 +00:00
|
|
|
|
|
|
|
ProcessListForUser::ProcessListForUser()
|
|
|
|
: user_performance_counters(ProfileEvents::Level::User, &ProfileEvents::global_counters)
|
|
|
|
{}
|
|
|
|
|
|
|
|
|
2015-06-21 06:06:04 +00:00
|
|
|
}
|