2018-05-23 20:19:33 +00:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <memory>
|
|
|
|
#include <Processors/Port.h>
|
2022-02-06 11:44:34 +00:00
|
|
|
#include <Common/Stopwatch.h>
|
2018-05-23 20:19:33 +00:00
|
|
|
|
|
|
|
|
|
|
|
class EventCounter;
|
|
|
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
2020-02-25 18:10:48 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
|
|
|
extern const int LOGICAL_ERROR;
|
|
|
|
extern const int NOT_IMPLEMENTED;
|
|
|
|
}
|
2018-05-23 20:19:33 +00:00
|
|
|
|
2020-06-25 09:39:17 +00:00
|
|
|
class IQueryPlanStep;
|
|
|
|
|
2022-05-31 14:43:38 +00:00
|
|
|
struct StorageLimits;
|
|
|
|
using StorageLimitsList = std::list<StorageLimits>;
|
|
|
|
|
2019-02-27 11:24:14 +00:00
|
|
|
class IProcessor;
|
|
|
|
using ProcessorPtr = std::shared_ptr<IProcessor>;
|
|
|
|
using Processors = std::vector<ProcessorPtr>;
|
|
|
|
|
2018-05-23 20:19:33 +00:00
|
|
|
/** Processor is an element (low level building block) of a query execution pipeline.
|
|
|
|
* It has zero or more input ports and zero or more output ports.
|
|
|
|
*
|
|
|
|
* Blocks of data are transferred over ports.
|
|
|
|
* Each port has fixed structure: names and types of columns and values of constants.
|
|
|
|
*
|
|
|
|
* Processors may pull data from input ports, do some processing and push data to output ports.
|
|
|
|
* Processor may indicate that it requires input data to proceed and indicate that it needs data from some ports.
|
|
|
|
*
|
|
|
|
* Synchronous work must only use CPU - don't do any sleep, IO wait, network wait.
|
|
|
|
*
|
|
|
|
* Processor may want to do work asynchronously (example: fetch data from remote server)
|
|
|
|
* - in this case it will initiate background job and allow to subscribe to it.
|
|
|
|
*
|
|
|
|
* Processor may throw an exception to indicate some runtime error.
|
|
|
|
*
|
|
|
|
* Different ports may have different structure. For example, ports may correspond to different resultsets
|
|
|
|
* or semantically different parts of result.
|
|
|
|
*
|
2018-05-24 01:02:16 +00:00
|
|
|
* Processor may modify its ports (create another processors and connect to them) on the fly.
|
|
|
|
* Example: first execute the subquery; on basis of subquery result
|
|
|
|
* determine how to execute the rest of query and build the corresponding pipeline.
|
|
|
|
*
|
|
|
|
* Processor may simply wait for another processor to execute without transferring any data from it.
|
|
|
|
* For this purpose it should connect its input port to another processor, and indicate need of data.
|
2018-05-23 20:19:33 +00:00
|
|
|
*
|
|
|
|
* Examples:
|
|
|
|
*
|
|
|
|
* Source. Has no input ports and single output port. Generates data itself and pushes it to its output port.
|
|
|
|
*
|
|
|
|
* Sink. Has single input port and no output ports. Consumes data that was passed to its input port.
|
|
|
|
*
|
|
|
|
* Empty source. Immediately says that data on its output port is finished.
|
|
|
|
*
|
|
|
|
* Null sink. Consumes data and does nothing.
|
|
|
|
*
|
|
|
|
* Simple transformation. Has single input and single output port. Pulls data, transforms it and pushes to output port.
|
|
|
|
* Example: expression calculator.
|
|
|
|
* TODO Better to make each function a separate processor. It's better for pipeline analysis. Also keep in mind 'sleep' and 'rand' functions.
|
|
|
|
*
|
|
|
|
* Squashing or filtering transformation. Pulls data, possibly accumulates it, and sometimes pushes it to output port.
|
|
|
|
* Examples: DISTINCT, WHERE, squashing of blocks for INSERT SELECT.
|
|
|
|
*
|
|
|
|
* Accumulating transformation. Pulls and accumulates all data from input until it it exhausted, then pushes data to output port.
|
|
|
|
* Examples: ORDER BY, GROUP BY.
|
|
|
|
*
|
|
|
|
* Limiting transformation. Pulls data from input and passes to output.
|
|
|
|
* When there was enough data, says that it doesn't need data on its input and that data on its output port is finished.
|
|
|
|
*
|
2020-08-08 00:47:03 +00:00
|
|
|
* Resize. Has arbitrary number of inputs and arbitrary number of outputs.
|
|
|
|
* Pulls data from whatever ready input and pushes it to randomly chosen free output.
|
2018-05-23 20:19:33 +00:00
|
|
|
* Examples:
|
2020-08-08 00:47:03 +00:00
|
|
|
* Union - merge data from number of inputs to one output in arbitrary order.
|
|
|
|
* Split - read data from one input and pass it to arbitrary output.
|
2018-05-23 20:19:33 +00:00
|
|
|
*
|
|
|
|
* Concat. Has many inputs and only one output. Pulls all data from first input until it is exhausted,
|
|
|
|
* then all data from second input, etc. and pushes all data to output.
|
|
|
|
*
|
|
|
|
* Ordered merge. Has many inputs but only one output. Pulls data from selected input in specific order, merges and pushes it to output.
|
|
|
|
*
|
|
|
|
* Fork. Has one input and many outputs. Pulls data from input and copies it to all outputs.
|
|
|
|
* Used to process multiple queries with common source of data.
|
|
|
|
*
|
|
|
|
* Select. Has one or multiple inputs and one output.
|
|
|
|
* Read blocks from inputs and check that blocks on inputs are "parallel": correspond to each other in number of rows.
|
|
|
|
* Construct a new block by selecting some subset (or all) of columns from inputs.
|
2018-05-24 01:02:16 +00:00
|
|
|
* Example: collect columns - function arguments before function execution.
|
2018-05-23 20:19:33 +00:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* TODO Processors may carry algebraic properties about transformations they do.
|
|
|
|
* For example, that processor doesn't change number of rows; doesn't change order of rows, doesn't change the set of rows, etc.
|
|
|
|
*
|
2018-05-24 01:02:16 +00:00
|
|
|
* TODO Ports may carry algebraic properties about streams of data.
|
|
|
|
* For example, that data comes ordered by specific key; or grouped by specific key; or have unique values of specific key.
|
2018-05-24 02:39:22 +00:00
|
|
|
* And also simple properties, including lower and upper bound on number of rows.
|
2018-05-24 01:02:16 +00:00
|
|
|
*
|
2018-05-23 20:19:33 +00:00
|
|
|
* TODO Processor should have declarative representation, that is able to be serialized and parsed.
|
2018-05-24 01:02:16 +00:00
|
|
|
* Example: read_from_merge_tree(database, table, Columns(a, b, c), Piece(0, 10), Parts(Part('name', MarkRanges(MarkRange(0, 100), ...)), ...))
|
2018-05-23 20:19:33 +00:00
|
|
|
* It's reasonable to have an intermediate language for declaration of pipelines.
|
2018-05-24 01:02:16 +00:00
|
|
|
*
|
2018-05-23 20:19:33 +00:00
|
|
|
* TODO Processor with all its parameters should represent "pure" function on streams of data from its input ports.
|
|
|
|
* It's in question, what kind of "pure" function do we mean.
|
|
|
|
* For example, data streams are considered equal up to order unless ordering properties are stated explicitly.
|
2020-08-08 00:47:03 +00:00
|
|
|
* Another example: we should support the notion of "arbitrary N-th of M substream" of full stream of data.
|
2018-05-23 20:19:33 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
class IProcessor
|
|
|
|
{
|
|
|
|
protected:
|
|
|
|
InputPorts inputs;
|
|
|
|
OutputPorts outputs;
|
|
|
|
|
|
|
|
public:
|
2019-02-05 13:01:40 +00:00
|
|
|
IProcessor() = default;
|
2018-05-23 20:19:33 +00:00
|
|
|
|
|
|
|
IProcessor(InputPorts inputs_, OutputPorts outputs_)
|
|
|
|
: inputs(std::move(inputs_)), outputs(std::move(outputs_))
|
|
|
|
{
|
|
|
|
for (auto & port : inputs)
|
|
|
|
port.processor = this;
|
|
|
|
for (auto & port : outputs)
|
|
|
|
port.processor = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual String getName() const = 0;
|
|
|
|
|
|
|
|
enum class Status
|
|
|
|
{
|
|
|
|
/// Processor needs some data at its inputs to proceed.
|
|
|
|
/// You need to run another processor to generate required input and then call 'prepare' again.
|
|
|
|
NeedData,
|
|
|
|
|
2019-02-07 18:51:53 +00:00
|
|
|
/// Processor cannot proceed because output port is full or not isNeeded().
|
2018-05-23 20:19:33 +00:00
|
|
|
/// You need to transfer data from output port to the input port of another processor and then call 'prepare' again.
|
|
|
|
PortFull,
|
|
|
|
|
2019-02-07 18:51:53 +00:00
|
|
|
/// All work is done (all data is processed or all output are closed), nothing more to do.
|
2018-05-23 20:19:33 +00:00
|
|
|
Finished,
|
|
|
|
|
|
|
|
/// No one needs data on output ports.
|
2019-02-07 18:51:53 +00:00
|
|
|
/// Unneeded,
|
2018-05-23 20:19:33 +00:00
|
|
|
|
|
|
|
/// You may call 'work' method and processor will do some work synchronously.
|
|
|
|
Ready,
|
|
|
|
|
2020-12-03 17:03:13 +00:00
|
|
|
/// You may call 'schedule' method and processor will return descriptor.
|
|
|
|
/// You need to poll this descriptor and call work() afterwards.
|
2018-05-23 20:19:33 +00:00
|
|
|
Async,
|
|
|
|
|
2019-02-27 11:24:14 +00:00
|
|
|
/// Processor wants to add other processors to pipeline.
|
|
|
|
/// New processors must be obtained by expandPipeline() call.
|
|
|
|
ExpandPipeline,
|
2018-05-23 20:19:33 +00:00
|
|
|
};
|
|
|
|
|
2019-02-08 16:10:57 +00:00
|
|
|
static std::string statusToName(Status status);
|
|
|
|
|
2020-04-18 09:51:21 +00:00
|
|
|
/** Method 'prepare' is responsible for all cheap ("instantaneous": O(1) of data volume, no wait) calculations.
|
2018-05-23 20:19:33 +00:00
|
|
|
*
|
|
|
|
* It may access input and output ports,
|
|
|
|
* indicate the need for work by another processor by returning NeedData or PortFull,
|
2020-04-18 09:51:21 +00:00
|
|
|
* or indicate the absence of work by returning Finished or Unneeded,
|
2018-05-23 20:19:33 +00:00
|
|
|
* it may pull data from input ports and push data to output ports.
|
|
|
|
*
|
|
|
|
* The method is not thread-safe and must be called from a single thread in one moment of time,
|
|
|
|
* even for different connected processors.
|
|
|
|
*
|
|
|
|
* Instead of all long work (CPU calculations or waiting) it should just prepare all required data and return Ready or Async.
|
|
|
|
*
|
|
|
|
* Thread safety and parallel execution:
|
|
|
|
* - no methods (prepare, work, schedule) of single object can be executed in parallel;
|
|
|
|
* - method 'work' can be executed in parallel for different objects, even for connected processors;
|
|
|
|
* - method 'prepare' cannot be executed in parallel even for different objects,
|
|
|
|
* if they are connected (including indirectly) to each other by their ports;
|
|
|
|
*/
|
2019-11-27 16:24:44 +00:00
|
|
|
virtual Status prepare()
|
|
|
|
{
|
2022-04-24 10:34:50 +00:00
|
|
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method 'prepare' is not implemented for {} processor", getName());
|
2019-11-27 16:24:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
using PortNumbers = std::vector<UInt64>;
|
|
|
|
|
|
|
|
/// Optimization for prepare in case we know ports were updated.
|
|
|
|
virtual Status prepare(const PortNumbers & /*updated_input_ports*/, const PortNumbers & /*updated_output_ports*/) { return prepare(); }
|
2018-05-23 20:19:33 +00:00
|
|
|
|
|
|
|
/** You may call this method if 'prepare' returned Ready.
|
|
|
|
* This method cannot access any ports. It should use only data that was prepared by 'prepare' method.
|
|
|
|
*
|
|
|
|
* Method work can be executed in parallel for different processors.
|
|
|
|
*/
|
|
|
|
virtual void work()
|
|
|
|
{
|
2022-04-24 10:34:50 +00:00
|
|
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method 'work' is not implemented for {} processor", getName());
|
2018-05-23 20:19:33 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 08:42:09 +00:00
|
|
|
/** Executor must call this method when 'prepare' returned Async.
|
2018-05-23 20:19:33 +00:00
|
|
|
* This method cannot access any ports. It should use only data that was prepared by 'prepare' method.
|
|
|
|
*
|
2020-12-22 08:42:09 +00:00
|
|
|
* This method should instantly return epollable file descriptor which will be readable when asynchronous job is done.
|
|
|
|
* When descriptor is readable, method `work` is called to continue data processing.
|
2018-05-23 20:19:33 +00:00
|
|
|
*
|
2020-12-22 08:42:09 +00:00
|
|
|
* NOTE: it would be more logical to let `work()` return ASYNC status instead of prepare. This will get
|
|
|
|
* prepare() -> work() -> schedule() -> work() -> schedule() -> .. -> work() -> prepare()
|
|
|
|
* chain instead of
|
|
|
|
* prepare() -> work() -> prepare() -> schedule() -> work() -> prepare() -> schedule() -> .. -> work() -> prepare()
|
|
|
|
*
|
|
|
|
* It is expected that executor epoll using level-triggered notifications.
|
|
|
|
* Read all available data from descriptor before returning ASYNC.
|
2018-05-23 20:19:33 +00:00
|
|
|
*/
|
2020-12-03 17:03:13 +00:00
|
|
|
virtual int schedule()
|
2018-05-23 20:19:33 +00:00
|
|
|
{
|
2022-04-24 10:34:50 +00:00
|
|
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method 'schedule' is not implemented for {} processor", getName());
|
2018-05-23 20:19:33 +00:00
|
|
|
}
|
|
|
|
|
2019-02-27 11:24:14 +00:00
|
|
|
/** You must call this method if 'prepare' returned ExpandPipeline.
|
|
|
|
* This method cannot access any port, but it can create new ports for current processor.
|
|
|
|
*
|
|
|
|
* Method should return set of new already connected processors.
|
|
|
|
* All added processors must be connected only to each other or current processor.
|
|
|
|
*
|
|
|
|
* Method can't remove or reconnect existing ports, move data from/to port or perform calculations.
|
|
|
|
* 'prepare' should be called again after expanding pipeline.
|
|
|
|
*/
|
|
|
|
virtual Processors expandPipeline()
|
|
|
|
{
|
2022-04-24 10:34:50 +00:00
|
|
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method 'expandPipeline' is not implemented for {} processor", getName());
|
2019-02-27 11:24:14 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 14:25:41 +00:00
|
|
|
/// In case if query was cancelled executor will wait till all processors finish their jobs.
|
|
|
|
/// Generally, there is no reason to check this flag. However, it may be reasonable for long operations (e.g. i/o).
|
|
|
|
bool isCancelled() const { return is_cancelled; }
|
2020-01-27 14:31:40 +00:00
|
|
|
void cancel()
|
|
|
|
{
|
|
|
|
is_cancelled = true;
|
2020-01-27 16:03:55 +00:00
|
|
|
onCancel();
|
2020-01-27 14:31:40 +00:00
|
|
|
}
|
2019-08-01 14:25:41 +00:00
|
|
|
|
2020-04-12 15:02:17 +00:00
|
|
|
/// Additional method which is called in case if ports were updated while work() method.
|
|
|
|
/// May be used to stop execution in rare cases.
|
2020-04-12 19:01:02 +00:00
|
|
|
virtual void onUpdatePorts() {}
|
2020-04-12 15:02:17 +00:00
|
|
|
|
2019-02-05 13:01:40 +00:00
|
|
|
virtual ~IProcessor() = default;
|
2018-05-23 20:19:33 +00:00
|
|
|
|
|
|
|
auto & getInputs() { return inputs; }
|
|
|
|
auto & getOutputs() { return outputs; }
|
|
|
|
|
2019-11-27 16:24:44 +00:00
|
|
|
UInt64 getInputPortNumber(const InputPort * input_port) const
|
|
|
|
{
|
|
|
|
UInt64 number = 0;
|
2021-01-22 09:13:22 +00:00
|
|
|
for (const auto & port : inputs)
|
2019-11-27 16:24:44 +00:00
|
|
|
{
|
|
|
|
if (&port == input_port)
|
|
|
|
return number;
|
|
|
|
|
|
|
|
++number;
|
|
|
|
}
|
|
|
|
|
2022-04-24 10:34:50 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't find input port for {} processor", getName());
|
2019-11-27 16:24:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UInt64 getOutputPortNumber(const OutputPort * output_port) const
|
|
|
|
{
|
|
|
|
UInt64 number = 0;
|
2021-01-22 09:13:22 +00:00
|
|
|
for (const auto & port : outputs)
|
2019-11-27 16:24:44 +00:00
|
|
|
{
|
|
|
|
if (&port == output_port)
|
|
|
|
return number;
|
|
|
|
|
|
|
|
++number;
|
|
|
|
}
|
|
|
|
|
2022-04-24 10:34:50 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't find output port for {} processor", getName());
|
2019-11-27 16:24:44 +00:00
|
|
|
}
|
|
|
|
|
2019-10-20 09:12:42 +00:00
|
|
|
const auto & getInputs() const { return inputs; }
|
|
|
|
const auto & getOutputs() const { return outputs; }
|
|
|
|
|
2018-05-23 20:19:33 +00:00
|
|
|
/// Debug output.
|
|
|
|
void dump() const;
|
2019-05-16 14:57:27 +00:00
|
|
|
|
2019-08-01 14:25:41 +00:00
|
|
|
/// Used to print pipeline.
|
2019-06-19 18:30:02 +00:00
|
|
|
void setDescription(const std::string & description_) { processor_description = description_; }
|
|
|
|
const std::string & getDescription() const { return processor_description; }
|
2019-08-01 14:25:41 +00:00
|
|
|
|
2019-09-02 14:49:05 +00:00
|
|
|
/// Helpers for pipeline executor.
|
2019-09-02 14:55:43 +00:00
|
|
|
void setStream(size_t value) { stream_number = value; }
|
|
|
|
size_t getStream() const { return stream_number; }
|
2019-09-02 14:49:05 +00:00
|
|
|
constexpr static size_t NO_STREAM = std::numeric_limits<size_t>::max();
|
|
|
|
|
2020-06-25 09:39:17 +00:00
|
|
|
/// Step of QueryPlan from which processor was created.
|
|
|
|
void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0)
|
|
|
|
{
|
|
|
|
query_plan_step = step;
|
|
|
|
query_plan_step_group = group;
|
|
|
|
}
|
|
|
|
|
|
|
|
IQueryPlanStep * getQueryPlanStep() const { return query_plan_step; }
|
|
|
|
size_t getQueryPlanStepGroup() const { return query_plan_step_group; }
|
|
|
|
|
2022-02-06 11:44:34 +00:00
|
|
|
uint64_t getElapsedUs() const { return elapsed_us; }
|
2022-02-27 10:52:27 +00:00
|
|
|
uint64_t getInputWaitElapsedUs() const { return input_wait_elapsed_us; }
|
|
|
|
uint64_t getOutputWaitElapsedUs() const { return output_wait_elapsed_us; }
|
2022-02-06 11:44:34 +00:00
|
|
|
|
2022-08-11 11:02:41 +00:00
|
|
|
struct ProcessorDataStats
|
|
|
|
{
|
|
|
|
size_t input_rows = 0;
|
|
|
|
size_t input_bytes = 0;
|
|
|
|
size_t output_rows = 0;
|
|
|
|
size_t output_bytes = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
ProcessorDataStats getProcessorDataStats() const
|
|
|
|
{
|
|
|
|
ProcessorDataStats stats;
|
|
|
|
|
|
|
|
for (const auto & input : inputs)
|
|
|
|
{
|
|
|
|
stats.input_rows += input.rows;
|
|
|
|
stats.input_bytes += input.bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto & output : outputs)
|
|
|
|
{
|
|
|
|
stats.output_rows += output.rows;
|
|
|
|
stats.output_bytes += output.bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
return stats;
|
|
|
|
}
|
|
|
|
|
2022-05-31 14:43:38 +00:00
|
|
|
struct ReadProgressCounters
|
2022-05-09 10:28:05 +00:00
|
|
|
{
|
|
|
|
uint64_t read_rows = 0;
|
|
|
|
uint64_t read_bytes = 0;
|
2022-05-20 19:49:31 +00:00
|
|
|
uint64_t total_rows_approx = 0;
|
2022-05-09 10:28:05 +00:00
|
|
|
};
|
|
|
|
|
2022-05-31 14:43:38 +00:00
|
|
|
struct ReadProgress
|
|
|
|
{
|
|
|
|
ReadProgressCounters counters;
|
|
|
|
const StorageLimitsList & limits;
|
|
|
|
};
|
|
|
|
|
2022-05-31 19:40:50 +00:00
|
|
|
/// Set limits for current storage.
|
2022-05-31 14:43:38 +00:00
|
|
|
/// Different limits may be applied to different storages, we need to keep it per processor.
|
|
|
|
/// This method is need to be override only for sources.
|
|
|
|
virtual void setStorageLimits(const std::shared_ptr<const StorageLimitsList> & /*storage_limits*/) {}
|
|
|
|
|
2022-05-09 10:28:05 +00:00
|
|
|
/// This method is called for every processor without input ports.
|
|
|
|
/// Processor can return a new progress for the last read operation.
|
|
|
|
/// You should zero internal counters in the call, in order to make in idempotent.
|
|
|
|
virtual std::optional<ReadProgress> getReadProgress() { return std::nullopt; }
|
|
|
|
|
2020-01-27 14:31:40 +00:00
|
|
|
protected:
|
2020-01-27 14:39:14 +00:00
|
|
|
virtual void onCancel() {}
|
2020-01-27 14:31:40 +00:00
|
|
|
|
2019-08-01 14:25:41 +00:00
|
|
|
private:
|
2022-02-06 11:44:34 +00:00
|
|
|
/// For:
|
|
|
|
/// - elapsed_us
|
|
|
|
friend class ExecutionThreadContext;
|
|
|
|
/// For
|
2022-02-27 10:52:27 +00:00
|
|
|
/// - input_wait_elapsed_us
|
|
|
|
/// - output_wait_elapsed_us
|
2022-02-06 11:44:34 +00:00
|
|
|
friend class ExecutingGraph;
|
|
|
|
|
2019-08-01 14:25:41 +00:00
|
|
|
std::atomic<bool> is_cancelled{false};
|
|
|
|
|
|
|
|
std::string processor_description;
|
2019-09-02 14:49:05 +00:00
|
|
|
|
2022-02-06 11:44:34 +00:00
|
|
|
/// For processors_profile_log
|
|
|
|
uint64_t elapsed_us = 0;
|
2022-02-27 10:52:27 +00:00
|
|
|
Stopwatch input_wait_watch;
|
|
|
|
uint64_t input_wait_elapsed_us = 0;
|
|
|
|
Stopwatch output_wait_watch;
|
|
|
|
uint64_t output_wait_elapsed_us = 0;
|
2022-02-06 11:44:34 +00:00
|
|
|
|
2019-09-02 14:55:43 +00:00
|
|
|
size_t stream_number = NO_STREAM;
|
2019-12-26 16:15:31 +00:00
|
|
|
|
2020-06-26 17:56:33 +00:00
|
|
|
IQueryPlanStep * query_plan_step = nullptr;
|
|
|
|
size_t query_plan_step_group = 0;
|
2018-05-23 20:19:33 +00:00
|
|
|
};
|
|
|
|
|
2019-02-27 11:24:14 +00:00
|
|
|
|
2018-05-23 20:19:33 +00:00
|
|
|
}
|