Updated test #4246

This commit is contained in:
Alexey Milovidov 2019-02-10 19:55:12 +03:00
parent 4799a343ee
commit 58a6accee5
118 changed files with 213 additions and 176 deletions

View File

@ -1037,7 +1037,7 @@ try
Obfuscator obfuscator(header, seed, markov_model_params);
size_t max_block_size = 8192;
UInt64 max_block_size = 8192;
/// Train step
{

View File

@ -75,7 +75,7 @@ void ODBCHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Ne
return;
}
size_t max_block_size = DEFAULT_BLOCK_SIZE;
UInt64 max_block_size = DEFAULT_BLOCK_SIZE;
if (params.has("max_block_size"))
{
std::string max_block_size_str = params.get("max_block_size", "");

View File

@ -33,7 +33,7 @@ class IXDBCBridgeHelper
public:
static constexpr inline auto DEFAULT_FORMAT = "RowBinary";
virtual std::vector<std::pair<std::string, std::string>> getURLParams(const std::string & cols, size_t max_block_size) const = 0;
virtual std::vector<std::pair<std::string, std::string>> getURLParams(const std::string & cols, UInt64 max_block_size) const = 0;
virtual void startBridgeSync() const = 0;
virtual Poco::URI getMainURI() const = 0;
virtual Poco::URI getColumnsInfoURI() const = 0;
@ -127,7 +127,7 @@ public:
/**
* @todo leaky abstraction - used by external API's
*/
std::vector<std::pair<std::string, std::string>> getURLParams(const std::string & cols, size_t max_block_size) const override
std::vector<std::pair<std::string, std::string>> getURLParams(const std::string & cols, UInt64 max_block_size) const override
{
std::vector<std::pair<std::string, std::string>> result;

View File

@ -13,7 +13,7 @@ namespace ErrorCodes
AggregatingSortedBlockInputStream::AggregatingSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_)
const BlockInputStreams & inputs_, const SortDescription & description_, UInt64 max_block_size_)
: MergingSortedBlockInputStream(inputs_, description_, max_block_size_)
{
/// Fill in the column numbers that need to be aggregated.

View File

@ -22,7 +22,7 @@ class AggregatingSortedBlockInputStream : public MergingSortedBlockInputStream
{
public:
AggregatingSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_);
const BlockInputStreams & inputs_, const SortDescription & description_, UInt64 max_block_size_);
String getName() const override { return "AggregatingSorted"; }

View File

@ -25,7 +25,7 @@ class CollapsingSortedBlockInputStream : public MergingSortedBlockInputStream
public:
CollapsingSortedBlockInputStream(
BlockInputStreams inputs_, const SortDescription & description_,
const String & sign_column, size_t max_block_size_, WriteBuffer * out_row_sources_buf_ = nullptr)
const String & sign_column, UInt64 max_block_size_, WriteBuffer * out_row_sources_buf_ = nullptr)
: MergingSortedBlockInputStream(inputs_, description_, max_block_size_, 0, out_row_sources_buf_)
{
sign_column_number = header.getPositionByName(sign_column);

View File

@ -13,7 +13,7 @@ namespace ErrorCodes
GraphiteRollupSortedBlockInputStream::GraphiteRollupSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_,
const BlockInputStreams & inputs_, const SortDescription & description_, UInt64 max_block_size_,
const Graphite::Params & params, time_t time_of_merge)
: MergingSortedBlockInputStream(inputs_, description_, max_block_size_),
params(params), time_of_merge(time_of_merge)

View File

@ -127,7 +127,7 @@ class GraphiteRollupSortedBlockInputStream : public MergingSortedBlockInputStrea
{
public:
GraphiteRollupSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_,
const BlockInputStreams & inputs_, const SortDescription & description_, UInt64 max_block_size_,
const Graphite::Params & params, time_t time_of_merge);
String getName() const override { return "GraphiteRollupSorted"; }

View File

@ -17,7 +17,7 @@ namespace ErrorCodes
MergingSortedBlockInputStream::MergingSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_,
size_t max_block_size_, UInt64 limit_, WriteBuffer * out_row_sources_buf_, bool quiet_)
UInt64 max_block_size_, UInt64 limit_, WriteBuffer * out_row_sources_buf_, bool quiet_)
: description(description_), max_block_size(max_block_size_), limit(limit_), quiet(quiet_)
, source_blocks(inputs_.size()), cursors(inputs_.size()), out_row_sources_buf(out_row_sources_buf_)
{

View File

@ -68,7 +68,7 @@ public:
* quiet - don't log profiling info
*/
MergingSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_,
const BlockInputStreams & inputs_, const SortDescription & description_, UInt64 max_block_size_,
UInt64 limit_ = 0, WriteBuffer * out_row_sources_buf_ = nullptr, bool quiet_ = false);
String getName() const override { return "MergingSorted"; }
@ -133,7 +133,7 @@ protected:
Block header;
const SortDescription description;
const size_t max_block_size;
const UInt64 max_block_size;
UInt64 limit;
size_t total_merged_rows = 0;

View File

@ -17,7 +17,7 @@ class ReplacingSortedBlockInputStream : public MergingSortedBlockInputStream
public:
ReplacingSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_,
const String & version_column, size_t max_block_size_, WriteBuffer * out_row_sources_buf_ = nullptr)
const String & version_column, UInt64 max_block_size_, WriteBuffer * out_row_sources_buf_ = nullptr)
: MergingSortedBlockInputStream(inputs_, description_, max_block_size_, 0, out_row_sources_buf_)
{
if (!version_column.empty())

View File

@ -44,7 +44,7 @@ SummingSortedBlockInputStream::SummingSortedBlockInputStream(
const SortDescription & description_,
/// List of columns to be summed. If empty, all numeric columns that are not in the description are taken.
const Names & column_names_to_sum,
size_t max_block_size_)
UInt64 max_block_size_)
: MergingSortedBlockInputStream(inputs_, description_, max_block_size_)
{
current_row.resize(num_columns);

View File

@ -30,7 +30,7 @@ public:
const SortDescription & description_,
/// List of columns to be summed. If empty, all numeric columns that are not in the description are taken.
const Names & column_names_to_sum_,
size_t max_block_size_);
UInt64 max_block_size_);
String getName() const override { return "SummingSorted"; }

View File

@ -15,7 +15,7 @@ namespace ErrorCodes
VersionedCollapsingSortedBlockInputStream::VersionedCollapsingSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_,
const String & sign_column_, size_t max_block_size_,
const String & sign_column_, UInt64 max_block_size_,
WriteBuffer * out_row_sources_buf_)
: MergingSortedBlockInputStream(inputs_, description_, max_block_size_, 0, out_row_sources_buf_)
, max_rows_in_queue(std::min(std::max<size_t>(3, max_block_size_), MAX_ROWS_IN_MULTIVERSION_QUEUE) - 2)

View File

@ -176,7 +176,7 @@ public:
/// max_rows_in_queue should be about max_block_size_ if we won't store a lot of extra blocks (RowRef holds SharedBlockPtr).
VersionedCollapsingSortedBlockInputStream(
const BlockInputStreams & inputs_, const SortDescription & description_,
const String & sign_column_, size_t max_block_size_,
const String & sign_column_, UInt64 max_block_size_,
WriteBuffer * out_row_sources_buf_ = nullptr);
String getName() const override { return "VersionedCollapsingSorted"; }

View File

@ -575,7 +575,7 @@ PaddedPODArray<CacheDictionary::Key> CacheDictionary::getCachedIds() const
return array;
}
BlockInputStreamPtr CacheDictionary::getBlockInputStream(const Names & column_names, size_t max_block_size) const
BlockInputStreamPtr CacheDictionary::getBlockInputStream(const Names & column_names, UInt64 max_block_size) const
{
using BlockInputStreamType = DictionaryBlockInputStream<CacheDictionary, Key>;
return std::make_shared<BlockInputStreamType>(shared_from_this(), max_block_size, getCachedIds(), column_names);

View File

@ -149,7 +149,7 @@ public:
void has(const PaddedPODArray<Key> & ids, PaddedPODArray<UInt8> & out) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, size_t max_block_size) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, UInt64 max_block_size) const override;
private:
template <typename Value>

View File

@ -383,7 +383,7 @@ bool ComplexKeyCacheDictionary::isEmptyCell(const UInt64 idx) const
&& (idx != zero_cell_idx || cells[idx].data == ext::safe_bit_cast<CellMetadata::time_point_urep_t>(CellMetadata::time_point_t())));
}
BlockInputStreamPtr ComplexKeyCacheDictionary::getBlockInputStream(const Names & column_names, size_t max_block_size) const
BlockInputStreamPtr ComplexKeyCacheDictionary::getBlockInputStream(const Names & column_names, UInt64 max_block_size) const
{
std::vector<StringRef> keys;
{

View File

@ -180,7 +180,7 @@ public:
void has(const Columns & key_columns, const DataTypes & key_types, PaddedPODArray<UInt8> & out) const;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, size_t max_block_size) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, UInt64 max_block_size) const override;
private:
template <typename Value>

View File

@ -784,7 +784,7 @@ std::vector<StringRef> ComplexKeyHashedDictionary::getKeys(const Attribute & att
return keys;
}
BlockInputStreamPtr ComplexKeyHashedDictionary::getBlockInputStream(const Names & column_names, size_t max_block_size) const
BlockInputStreamPtr ComplexKeyHashedDictionary::getBlockInputStream(const Names & column_names, UInt64 max_block_size) const
{
using BlockInputStreamType = DictionaryBlockInputStream<ComplexKeyHashedDictionary, UInt64>;
return std::make_shared<BlockInputStreamType>(shared_from_this(), max_block_size, getKeys(), column_names);

View File

@ -153,7 +153,7 @@ public:
void has(const Columns & key_columns, const DataTypes & key_types, PaddedPODArray<UInt8> & out) const;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, size_t max_block_size) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, UInt64 max_block_size) const override;
private:
template <typename Value>

View File

@ -32,11 +32,11 @@ public:
using DictionaryPtr = std::shared_ptr<DictionaryType const>;
DictionaryBlockInputStream(
std::shared_ptr<const IDictionaryBase> dictionary, size_t max_block_size, PaddedPODArray<Key> && ids, const Names & column_names);
std::shared_ptr<const IDictionaryBase> dictionary, UInt64 max_block_size, PaddedPODArray<Key> && ids, const Names & column_names);
DictionaryBlockInputStream(
std::shared_ptr<const IDictionaryBase> dictionary,
size_t max_block_size,
UInt64 max_block_size,
const std::vector<StringRef> & keys,
const Names & column_names);
@ -48,7 +48,7 @@ public:
// Now used in trie dictionary, where columns are stored as ip and mask, and are showed as string
DictionaryBlockInputStream(
std::shared_ptr<const IDictionaryBase> dictionary,
size_t max_block_size,
UInt64 max_block_size,
const Columns & data_columns,
const Names & column_names,
GetColumnsFunction && get_key_columns_function,
@ -202,7 +202,7 @@ private:
template <typename DictionaryType, typename Key>
DictionaryBlockInputStream<DictionaryType, Key>::DictionaryBlockInputStream(
std::shared_ptr<const IDictionaryBase> dictionary, size_t max_block_size, PaddedPODArray<Key> && ids, const Names & column_names)
std::shared_ptr<const IDictionaryBase> dictionary, UInt64 max_block_size, PaddedPODArray<Key> && ids, const Names & column_names)
: DictionaryBlockInputStreamBase(ids.size(), max_block_size)
, dictionary(std::static_pointer_cast<const DictionaryType>(dictionary))
, column_names(column_names)
@ -217,7 +217,7 @@ DictionaryBlockInputStream<DictionaryType, Key>::DictionaryBlockInputStream(
template <typename DictionaryType, typename Key>
DictionaryBlockInputStream<DictionaryType, Key>::DictionaryBlockInputStream(
std::shared_ptr<const IDictionaryBase> dictionary,
size_t max_block_size,
UInt64 max_block_size,
const std::vector<StringRef> & keys,
const Names & column_names)
: DictionaryBlockInputStreamBase(keys.size(), max_block_size)
@ -234,7 +234,7 @@ DictionaryBlockInputStream<DictionaryType, Key>::DictionaryBlockInputStream(
template <typename DictionaryType, typename Key>
DictionaryBlockInputStream<DictionaryType, Key>::DictionaryBlockInputStream(
std::shared_ptr<const IDictionaryBase> dictionary,
size_t max_block_size,
UInt64 max_block_size,
const Columns & data_columns,
const Names & column_names,
GetColumnsFunction && get_key_columns_function,

View File

@ -2,7 +2,7 @@
namespace DB
{
DictionaryBlockInputStreamBase::DictionaryBlockInputStreamBase(size_t rows_count, size_t max_block_size)
DictionaryBlockInputStreamBase::DictionaryBlockInputStreamBase(size_t rows_count, UInt64 max_block_size)
: rows_count(rows_count), max_block_size(max_block_size)
{
}

View File

@ -7,7 +7,7 @@ namespace DB
class DictionaryBlockInputStreamBase : public IBlockInputStream
{
protected:
DictionaryBlockInputStreamBase(size_t rows_count, size_t max_block_size);
DictionaryBlockInputStreamBase(size_t rows_count, UInt64 max_block_size);
virtual Block getBlock(size_t start, size_t length) const = 0;
@ -15,7 +15,7 @@ protected:
private:
const size_t rows_count;
const size_t max_block_size;
const UInt64 max_block_size;
size_t next_row = 0;
Block readImpl() override;

View File

@ -15,7 +15,7 @@
namespace DB
{
static const size_t max_block_size = 8192;
static const UInt64 max_block_size = 8192;
namespace

View File

@ -9,7 +9,7 @@
namespace DB
{
static const size_t max_block_size = 8192;
static const UInt64 max_block_size = 8192;
FileDictionarySource::FileDictionarySource(

View File

@ -749,7 +749,7 @@ PaddedPODArray<FlatDictionary::Key> FlatDictionary::getIds() const
return ids;
}
BlockInputStreamPtr FlatDictionary::getBlockInputStream(const Names & column_names, size_t max_block_size) const
BlockInputStreamPtr FlatDictionary::getBlockInputStream(const Names & column_names, UInt64 max_block_size) const
{
using BlockInputStreamType = DictionaryBlockInputStream<FlatDictionary, Key>;
return std::make_shared<BlockInputStreamType>(shared_from_this(), max_block_size, getIds(), column_names);

View File

@ -144,7 +144,7 @@ public:
void has(const PaddedPODArray<Key> & ids, PaddedPODArray<UInt8> & out) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, size_t max_block_size) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, UInt64 max_block_size) const override;
private:
template <typename Value>

View File

@ -15,7 +15,7 @@
namespace DB
{
static const size_t max_block_size = 8192;
static const UInt64 max_block_size = 8192;
HTTPDictionarySource::HTTPDictionarySource(

View File

@ -753,7 +753,7 @@ PaddedPODArray<HashedDictionary::Key> HashedDictionary::getIds() const
return PaddedPODArray<Key>();
}
BlockInputStreamPtr HashedDictionary::getBlockInputStream(const Names & column_names, size_t max_block_size) const
BlockInputStreamPtr HashedDictionary::getBlockInputStream(const Names & column_names, UInt64 max_block_size) const
{
using BlockInputStreamType = DictionaryBlockInputStream<HashedDictionary, Key>;
return std::make_shared<BlockInputStreamType>(shared_from_this(), max_block_size, getIds(), column_names);

View File

@ -144,7 +144,7 @@ public:
void isInVectorConstant(const PaddedPODArray<Key> & child_ids, const Key ancestor_id, PaddedPODArray<UInt8> & out) const override;
void isInConstantVector(const Key child_id, const PaddedPODArray<Key> & ancestor_ids, PaddedPODArray<UInt8> & out) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, size_t max_block_size) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, UInt64 max_block_size) const override;
private:
template <typename Value>

View File

@ -46,7 +46,7 @@ struct IDictionaryBase : public IExternalLoadable
virtual bool isInjective(const std::string & attribute_name) const = 0;
virtual BlockInputStreamPtr getBlockInputStream(const Names & column_names, size_t max_block_size) const = 0;
virtual BlockInputStreamPtr getBlockInputStream(const Names & column_names, UInt64 max_block_size) const = 0;
bool supportUpdates() const override { return !isCached(); }

View File

@ -33,7 +33,7 @@ MongoDBBlockInputStream::MongoDBBlockInputStream(
std::shared_ptr<Poco::MongoDB::Connection> & connection_,
std::unique_ptr<Poco::MongoDB::Cursor> cursor_,
const Block & sample_block,
const size_t max_block_size)
const UInt64 max_block_size)
: connection(connection_), cursor{std::move(cursor_)}, max_block_size{max_block_size}
{
description.init(sample_block);

View File

@ -25,7 +25,7 @@ public:
std::shared_ptr<Poco::MongoDB::Connection> & connection_,
std::unique_ptr<Poco::MongoDB::Cursor> cursor_,
const Block & sample_block,
const size_t max_block_size);
const UInt64 max_block_size);
~MongoDBBlockInputStream() override;
@ -38,7 +38,7 @@ private:
std::shared_ptr<Poco::MongoDB::Connection> connection;
std::unique_ptr<Poco::MongoDB::Cursor> cursor;
const size_t max_block_size;
const UInt64 max_block_size;
ExternalResultDescription description;
bool all_read = false;
};

View File

@ -63,7 +63,7 @@ namespace ErrorCodes
}
static const size_t max_block_size = 8192;
static const UInt64 max_block_size = 8192;
# if POCO_VERSION < 0x01070800

View File

@ -20,7 +20,7 @@ namespace ErrorCodes
MySQLBlockInputStream::MySQLBlockInputStream(
const mysqlxx::PoolWithFailover::Entry & entry, const std::string & query_str, const Block & sample_block, const size_t max_block_size)
const mysqlxx::PoolWithFailover::Entry & entry, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size)
: entry{entry}, query{this->entry->query(query_str)}, result{query.use()}, max_block_size{max_block_size}
{
if (sample_block.columns() != result.getNumFields())

View File

@ -18,7 +18,7 @@ public:
const mysqlxx::PoolWithFailover::Entry & entry,
const std::string & query_str,
const Block & sample_block,
const size_t max_block_size);
const UInt64 max_block_size);
String getName() const override { return "MySQL"; }
@ -30,7 +30,7 @@ private:
mysqlxx::PoolWithFailover::Entry entry;
mysqlxx::Query query;
mysqlxx::UseQueryResult result;
const size_t max_block_size;
const UInt64 max_block_size;
ExternalResultDescription description;
};

View File

@ -50,7 +50,7 @@ void registerDictionarySourceMysql(DictionarySourceFactory & factory)
namespace DB
{
static const size_t max_block_size = 8192;
static const UInt64 max_block_size = 8192;
MySQLDictionarySource::MySQLDictionarySource(

View File

@ -18,7 +18,7 @@ namespace ErrorCodes
ODBCBlockInputStream::ODBCBlockInputStream(
Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const size_t max_block_size)
Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size)
: session{session}
, statement{(this->session << query_str, Poco::Data::Keywords::now)}
, result{statement}

View File

@ -16,7 +16,7 @@ class ODBCBlockInputStream final : public IBlockInputStream
{
public:
ODBCBlockInputStream(
Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const size_t max_block_size);
Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size);
String getName() const override { return "ODBC"; }
@ -30,7 +30,7 @@ private:
Poco::Data::RecordSet result;
Poco::Data::RecordSet::Iterator iterator;
const size_t max_block_size;
const UInt64 max_block_size;
ExternalResultDescription description;
Poco::Logger * log;

View File

@ -25,7 +25,7 @@ public:
RangeDictionaryBlockInputStream(
DictionaryPtr dictionary,
size_t max_block_size,
UInt64 max_block_size,
const Names & column_names,
PaddedPODArray<Key> && ids_to_fill,
PaddedPODArray<RangeType> && start_dates,

View File

@ -634,7 +634,7 @@ void RangeHashedDictionary::getIdsAndDates(
template <typename RangeType>
BlockInputStreamPtr RangeHashedDictionary::getBlockInputStreamImpl(const Names & column_names, size_t max_block_size) const
BlockInputStreamPtr RangeHashedDictionary::getBlockInputStreamImpl(const Names & column_names, UInt64 max_block_size) const
{
PaddedPODArray<Key> ids;
PaddedPODArray<RangeType> start_dates;
@ -652,7 +652,7 @@ struct RangeHashedDIctionaryCallGetBlockInputStreamImpl
BlockInputStreamPtr stream;
const RangeHashedDictionary * dict;
const Names * column_names;
size_t max_block_size;
UInt64 max_block_size;
template <typename RangeType, size_t>
void operator()()
@ -663,7 +663,7 @@ struct RangeHashedDIctionaryCallGetBlockInputStreamImpl
}
};
BlockInputStreamPtr RangeHashedDictionary::getBlockInputStream(const Names & column_names, size_t max_block_size) const
BlockInputStreamPtr RangeHashedDictionary::getBlockInputStream(const Names & column_names, UInt64 max_block_size) const
{
using ListType = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64, Int128, Float32, Float64>;

View File

@ -93,7 +93,7 @@ public:
const PaddedPODArray<RangeStorageType> & dates,
ColumnString * out) const;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, size_t max_block_size) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, UInt64 max_block_size) const override;
struct Range
{
@ -210,7 +210,7 @@ private:
PaddedPODArray<RangeType> & end_dates) const;
template <typename RangeType>
BlockInputStreamPtr getBlockInputStreamImpl(const Names & column_names, size_t max_block_size) const;
BlockInputStreamPtr getBlockInputStreamImpl(const Names & column_names, UInt64 max_block_size) const;
friend struct RangeHashedDIctionaryCallGetBlockInputStreamImpl;

View File

@ -758,7 +758,7 @@ Columns TrieDictionary::getKeyColumns() const
return {std::move(ip_column), std::move(mask_column)};
}
BlockInputStreamPtr TrieDictionary::getBlockInputStream(const Names & column_names, size_t max_block_size) const
BlockInputStreamPtr TrieDictionary::getBlockInputStream(const Names & column_names, UInt64 max_block_size) const
{
using BlockInputStreamType = DictionaryBlockInputStream<TrieDictionary, UInt64>;

View File

@ -155,7 +155,7 @@ public:
void has(const Columns & key_columns, const DataTypes & key_types, PaddedPODArray<UInt8> & out) const;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, size_t max_block_size) const override;
BlockInputStreamPtr getBlockInputStream(const Names & column_names, UInt64 max_block_size) const override;
private:
template <typename Value>

View File

@ -39,7 +39,7 @@ namespace
std::function<void(std::ostream &)> callback,
const Block & sample_block,
const Context & context,
size_t max_block_size,
UInt64 max_block_size,
const ConnectionTimeouts & timeouts,
const String name)
: name(name)
@ -62,7 +62,7 @@ namespace
};
}
static const size_t max_block_size = 8192;
static const UInt64 max_block_size = 8192;
XDBCDictionarySource::XDBCDictionarySource(

View File

@ -33,7 +33,7 @@ void registerInputFormatRowBinary(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const Context &,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(

View File

@ -26,7 +26,7 @@ namespace ErrorCodes
BlockInputStreamFromRowInputStream::BlockInputStreamFromRowInputStream(
const RowInputStreamPtr & row_input_,
const Block & sample_,
size_t max_block_size_,
UInt64 max_block_size_,
const FormatSettings & settings)
: row_input(row_input_), sample(sample_), max_block_size(max_block_size_),
allow_errors_num(settings.input_allow_errors_num), allow_errors_ratio(settings.input_allow_errors_ratio)

View File

@ -21,7 +21,7 @@ public:
BlockInputStreamFromRowInputStream(
const RowInputStreamPtr & row_input_,
const Block & sample_,
size_t max_block_size_,
UInt64 max_block_size_,
const FormatSettings & settings);
void readPrefix() override { row_input->readPrefix(); }
@ -41,7 +41,7 @@ protected:
private:
RowInputStreamPtr row_input;
Block sample;
size_t max_block_size;
UInt64 max_block_size;
BlockMissingValues block_missing_values;
UInt64 allow_errors_num;

View File

@ -353,7 +353,7 @@ void registerInputFormatCSV(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const Context &,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(

View File

@ -295,7 +295,7 @@ void registerInputFormatCapnProto(FormatFactory & factory)
{
factory.registerInputFormat(
"CapnProto",
[](ReadBuffer & buf, const Block & sample, const Context & context, size_t max_block_size, const FormatSettings & settings)
[](ReadBuffer & buf, const Block & sample, const Context & context, UInt64 max_block_size, const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(
std::make_shared<CapnProtoRowInputStream>(buf, sample, FormatSchemaInfo(context, "capnp")),

View File

@ -27,7 +27,7 @@ const FormatFactory::Creators & FormatFactory::getCreators(const String & name)
}
BlockInputStreamPtr FormatFactory::getInput(const String & name, ReadBuffer & buf, const Block & sample, const Context & context, size_t max_block_size) const
BlockInputStreamPtr FormatFactory::getInput(const String & name, ReadBuffer & buf, const Block & sample, const Context & context, UInt64 max_block_size) const
{
const auto & input_getter = getCreators(name).first;
if (!input_getter)

View File

@ -34,7 +34,7 @@ private:
ReadBuffer & buf,
const Block & sample,
const Context & context,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)>;
using OutputCreator = std::function<BlockOutputStreamPtr(
@ -49,7 +49,7 @@ private:
public:
BlockInputStreamPtr getInput(const String & name, ReadBuffer & buf,
const Block & sample, const Context & context, size_t max_block_size) const;
const Block & sample, const Context & context, UInt64 max_block_size) const;
BlockOutputStreamPtr getOutput(const String & name, WriteBuffer & buf,
const Block & sample, const Context & context) const;

View File

@ -258,7 +258,7 @@ void registerInputFormatJSONEachRow(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const Context &,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(

View File

@ -197,7 +197,7 @@ void registerInputFormatTSKV(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const Context &,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(

View File

@ -331,7 +331,7 @@ void registerInputFormatTabSeparated(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const Context &,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(
@ -346,7 +346,7 @@ void registerInputFormatTabSeparated(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const Context &,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(
@ -361,7 +361,7 @@ void registerInputFormatTabSeparated(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const Context &,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(

View File

@ -154,7 +154,7 @@ void registerInputFormatValues(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const Context & context,
size_t max_block_size,
UInt64 max_block_size,
const FormatSettings & settings)
{
return std::make_shared<BlockInputStreamFromRowInputStream>(

View File

@ -1698,7 +1698,7 @@ void Context::checkPartitionCanBeDropped(const String & database, const String &
}
BlockInputStreamPtr Context::getInputFormat(const String & name, ReadBuffer & buf, const Block & sample, size_t max_block_size) const
BlockInputStreamPtr Context::getInputFormat(const String & name, ReadBuffer & buf, const Block & sample, UInt64 max_block_size) const
{
return FormatFactory::instance().getInput(name, buf, sample, *this, max_block_size);
}

View File

@ -262,7 +262,7 @@ public:
void tryCreateExternalModels() const;
/// I/O formats.
BlockInputStreamPtr getInputFormat(const String & name, ReadBuffer & buf, const Block & sample, size_t max_block_size) const;
BlockInputStreamPtr getInputFormat(const String & name, ReadBuffer & buf, const Block & sample, UInt64 max_block_size) const;
BlockOutputStreamPtr getOutputFormat(const String & name, WriteBuffer & buf, const Block & sample) const;
InterserverIOHandler & getInterserverIOHandler();

View File

@ -1109,7 +1109,7 @@ void ExpressionActions::optimizeArrayJoin()
}
BlockInputStreamPtr ExpressionActions::createStreamWithNonJoinedDataIfFullOrRightJoin(const Block & source_header, size_t max_block_size) const
BlockInputStreamPtr ExpressionActions::createStreamWithNonJoinedDataIfFullOrRightJoin(const Block & source_header, UInt64 max_block_size) const
{
for (const auto & action : actions)
if (action.join && (action.join->getKind() == ASTTableJoin::Kind::Full || action.join->getKind() == ASTTableJoin::Kind::Right))

View File

@ -238,7 +238,7 @@ public:
static std::string getSmallestColumn(const NamesAndTypesList & columns);
BlockInputStreamPtr createStreamWithNonJoinedDataIfFullOrRightJoin(const Block & source_header, size_t max_block_size) const;
BlockInputStreamPtr createStreamWithNonJoinedDataIfFullOrRightJoin(const Block & source_header, UInt64 max_block_size) const;
const Settings & getSettings() const { return settings; }

View File

@ -924,10 +924,7 @@ void InterpreterSelectQuery::executeFetchColumns(
max_streams = settings.max_distributed_connections;
}
size_t max_block_size = settings.max_block_size;
if (!max_block_size)
throw Exception("Setting 'max_block_size' cannot be zero", ErrorCodes::PARAMETER_OUT_OF_BOUND);
UInt64 max_block_size = settings.max_block_size;
auto [limit_length, limit_offset] = getLimitLengthAndOffset(query, context);
@ -946,10 +943,13 @@ void InterpreterSelectQuery::executeFetchColumns(
&& !query_analyzer->hasAggregation()
&& limit_length + limit_offset < max_block_size)
{
max_block_size = limit_length + limit_offset;
max_block_size = std::max(UInt64(1), limit_length + limit_offset);
max_streams = 1;
}
if (!max_block_size)
throw Exception("Setting 'max_block_size' cannot be zero", ErrorCodes::PARAMETER_OUT_OF_BOUND);
/// Initialize the initial data streams to which the query transforms are superimposed. Table or subquery or prepared input?
if (!pipeline.streams.empty())
{

View File

@ -1106,7 +1106,7 @@ class NonJoinedBlockInputStream : public IBlockInputStream
{
public:
NonJoinedBlockInputStream(const Join & parent_, const Block & left_sample_block, const Names & key_names_left,
const NamesAndTypesList & columns_added_by_join, size_t max_block_size_)
const NamesAndTypesList & columns_added_by_join, UInt64 max_block_size_)
: parent(parent_), max_block_size(max_block_size_)
{
/** left_sample_block contains keys and "left" columns.
@ -1183,7 +1183,7 @@ protected:
private:
const Join & parent;
size_t max_block_size;
UInt64 max_block_size;
Block result_sample_block;
/// Indices of columns in result_sample_block that come from the left-side table (except key columns).
@ -1334,7 +1334,7 @@ private:
BlockInputStreamPtr Join::createStreamWithNonJoinedRows(const Block & left_sample_block, const Names & key_names_left,
const NamesAndTypesList & columns_added_by_join, size_t max_block_size) const
const NamesAndTypesList & columns_added_by_join, UInt64 max_block_size) const
{
return std::make_shared<NonJoinedBlockInputStream>(*this, left_sample_block, key_names_left, columns_added_by_join, max_block_size);
}

View File

@ -120,7 +120,7 @@ public:
* left_sample_block is passed without account of 'use_nulls' setting (columns will be converted to Nullable inside).
*/
BlockInputStreamPtr createStreamWithNonJoinedRows(const Block & left_sample_block, const Names & key_names_left,
const NamesAndTypesList & columns_added_by_join, size_t max_block_size) const;
const NamesAndTypesList & columns_added_by_join, UInt64 max_block_size) const;
/// Number of keys in all built JOIN maps.
size_t getTotalRowCount() const;

View File

@ -134,7 +134,7 @@ public:
class KafkaBlockInputStream : public IBlockInputStream
{
public:
KafkaBlockInputStream(StorageKafka & storage_, const Context & context_, const String & schema, size_t max_block_size_)
KafkaBlockInputStream(StorageKafka & storage_, const Context & context_, const String & schema, UInt64 max_block_size_)
: storage(storage_), context(context_), max_block_size(max_block_size_)
{
// Always skip unknown fields regardless of the context (JSON or TSKV)
@ -222,7 +222,7 @@ private:
StorageKafka & storage;
ConsumerPtr consumer;
Context context;
size_t max_block_size;
UInt64 max_block_size;
Block sample_block;
std::unique_ptr<ReadBufferFromKafkaConsumer> read_buf;
BlockInputStreamPtr reader;
@ -254,7 +254,7 @@ StorageKafka::StorageKafka(
const ColumnsDescription & columns_,
const String & brokers_, const String & group_, const Names & topics_,
const String & format_name_, char row_delimiter_, const String & schema_name_,
size_t num_consumers_, size_t max_block_size_, size_t skip_broken_)
size_t num_consumers_, UInt64 max_block_size_, size_t skip_broken_)
: IStorage{columns_},
table_name(table_name_), database_name(database_name_), global_context(context_),
topics(global_context.getMacros()->expand(topics_)),
@ -724,7 +724,7 @@ void registerStorageKafka(StorageFactory & factory)
}
// Parse max block size (optional)
size_t max_block_size = 0;
UInt64 max_block_size = 0;
if (args_count >= 8)
{
auto ast = typeid_cast<const ASTLiteral *>(engine_args[7].get());

View File

@ -41,7 +41,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) override
@ -68,7 +68,7 @@ private:
/// Total number of consumers
size_t num_consumers;
/// Maximum block size for insertion into this table
size_t max_block_size;
UInt64 max_block_size;
/// Number of actually created consumers.
/// Can differ from num_consumers in case of exception in startup() (or if startup() hasn't been called).
/// In this case we still need to be able to shutdown() properly.
@ -103,7 +103,7 @@ protected:
const ColumnsDescription & columns_,
const String & brokers_, const String & group_, const Names & topics_,
const String & format_name_, char row_delimiter_, const String & schema_name_,
size_t num_consumers_, size_t max_block_size_, size_t skip_broken);
size_t num_consumers_, UInt64 max_block_size_, size_t skip_broken);
};
}

View File

@ -140,7 +140,7 @@ BlockInputStreams MergeTreeDataSelectExecutor::read(
const Names & column_names_to_return,
const SelectQueryInfo & query_info,
const Context & context,
const size_t max_block_size,
const UInt64 max_block_size,
const unsigned num_streams,
const PartitionIdToMaxBlock * max_block_numbers_to_read) const
{
@ -154,7 +154,7 @@ BlockInputStreams MergeTreeDataSelectExecutor::readFromParts(
const Names & column_names_to_return,
const SelectQueryInfo & query_info,
const Context & context,
const size_t max_block_size,
const UInt64 max_block_size,
const unsigned num_streams,
const PartitionIdToMaxBlock * max_block_numbers_to_read) const
{
@ -624,7 +624,7 @@ BlockInputStreams MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams(
RangesInDataParts && parts,
size_t num_streams,
const Names & column_names,
size_t max_block_size,
UInt64 max_block_size,
bool use_uncompressed_cache,
const PrewhereInfoPtr & prewhere_info,
const Names & virt_columns,
@ -766,7 +766,7 @@ BlockInputStreams MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreams(
BlockInputStreams MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal(
RangesInDataParts && parts,
const Names & column_names,
size_t max_block_size,
UInt64 max_block_size,
bool use_uncompressed_cache,
const PrewhereInfoPtr & prewhere_info,
const Names & virt_columns,

View File

@ -28,7 +28,7 @@ public:
const Names & column_names,
const SelectQueryInfo & query_info,
const Context & context,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams,
const PartitionIdToMaxBlock * max_block_numbers_to_read = nullptr) const;
@ -37,7 +37,7 @@ public:
const Names & column_names,
const SelectQueryInfo & query_info,
const Context & context,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams,
const PartitionIdToMaxBlock * max_block_numbers_to_read = nullptr) const;
@ -50,7 +50,7 @@ private:
RangesInDataParts && parts,
size_t num_streams,
const Names & column_names,
size_t max_block_size,
UInt64 max_block_size,
bool use_uncompressed_cache,
const PrewhereInfoPtr & prewhere_info,
const Names & virt_columns,
@ -59,7 +59,7 @@ private:
BlockInputStreams spreadMarkRangesAmongStreamsFinal(
RangesInDataParts && parts,
const Names & column_names,
size_t max_block_size,
UInt64 max_block_size,
bool use_uncompressed_cache,
const PrewhereInfoPtr & prewhere_info,
const Names & virt_columns,

View File

@ -16,7 +16,7 @@ namespace ErrorCodes
MergeTreeSelectBlockInputStream::MergeTreeSelectBlockInputStream(
const MergeTreeData & storage_,
const MergeTreeData::DataPartPtr & owned_data_part_,
size_t max_block_size_rows_,
UInt64 max_block_size_rows_,
size_t preferred_block_size_bytes_,
size_t preferred_max_column_in_block_size_bytes_,
Names column_names,

View File

@ -19,7 +19,7 @@ public:
MergeTreeSelectBlockInputStream(
const MergeTreeData & storage,
const MergeTreeData::DataPartPtr & owned_data_part,
size_t max_block_size_rows,
UInt64 max_block_size_rows,
size_t preferred_block_size_bytes,
size_t preferred_max_column_in_block_size_bytes,
Names column_names,

View File

@ -11,7 +11,7 @@ MergeTreeThreadSelectBlockInputStream::MergeTreeThreadSelectBlockInputStream(
const size_t thread,
const MergeTreeReadPoolPtr & pool,
const size_t min_marks_to_read_,
const size_t max_block_size_rows_,
const UInt64 max_block_size_rows_,
size_t preferred_block_size_bytes_,
size_t preferred_max_column_in_block_size_bytes_,
const MergeTreeData & storage_,

View File

@ -18,7 +18,7 @@ public:
const size_t thread,
const std::shared_ptr<MergeTreeReadPool> & pool,
const size_t min_marks_to_read,
const size_t max_block_size,
const UInt64 max_block_size,
size_t preferred_block_size_bytes,
size_t preferred_max_column_in_block_size_bytes,
const MergeTreeData & storage,

View File

@ -23,7 +23,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override
{
return MergeTreeDataSelectExecutor(part->storage).readFromParts(

View File

@ -138,7 +138,7 @@ BlockInputStreams StorageBuffer::read(
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams)
{
BlockInputStreams streams_from_dst;

View File

@ -61,7 +61,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

View File

@ -30,7 +30,7 @@ class CatBoostDatasetBlockInputStream : public IBlockInputStream
public:
CatBoostDatasetBlockInputStream(const std::string & file_name, const std::string & format_name,
const Block & sample_block, const Context & context, size_t max_block_size)
const Block & sample_block, const Context & context, UInt64 max_block_size)
: file_name(file_name), format_name(format_name)
{
read_buf = std::make_unique<ReadBufferFromFile>(file_name);
@ -262,7 +262,7 @@ BlockInputStreams StorageCatBoostPool::read(const Names & column_names,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
UInt64 max_block_size,
unsigned /*threads*/)
{
auto stream = std::make_shared<CatBoostDatasetBlockInputStream>(

View File

@ -18,7 +18,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned threads) override;
private:

View File

@ -46,7 +46,7 @@ BlockInputStreams StorageDictionary::read(
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
const size_t max_block_size,
const UInt64 max_block_size,
const unsigned /*threads*/)
{
auto dictionary = context.getExternalDictionaries().getDictionary(dictionary_name);

View File

@ -28,7 +28,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size = DEFAULT_BLOCK_SIZE,
UInt64 max_block_size = DEFAULT_BLOCK_SIZE,
unsigned threads = 1) override;
void drop() override {}

View File

@ -68,7 +68,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

View File

@ -116,7 +116,7 @@ StorageFile::StorageFile(
class StorageFileBlockInputStream : public IBlockInputStream
{
public:
StorageFileBlockInputStream(StorageFile & storage_, const Context & context, size_t max_block_size)
StorageFileBlockInputStream(StorageFile & storage_, const Context & context, UInt64 max_block_size)
: storage(storage_)
{
if (storage.use_table_fd)
@ -187,7 +187,7 @@ BlockInputStreams StorageFile::read(
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
UInt64 max_block_size,
unsigned /*num_streams*/)
{
BlockInputStreamPtr block_input = std::make_shared<StorageFileBlockInputStream>(*this, context, max_block_size);

View File

@ -36,7 +36,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(

View File

@ -48,7 +48,7 @@ public:
const String & format,
const Block & sample_block,
const Context & context,
size_t max_block_size)
UInt64 max_block_size)
{
std::unique_ptr<ReadBuffer> read_buf = std::make_unique<ReadBufferFromHDFS>(uri);
auto input_stream = FormatFactory::instance().getInput(format, *read_buf, sample_block, context, max_block_size);
@ -133,7 +133,7 @@ BlockInputStreams StorageHDFS::read(
const SelectQueryInfo & /*query_info*/,
const Context & context_,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
UInt64 max_block_size,
unsigned /*num_streams*/)
{
return {std::make_shared<HDFSBlockInputStream>(

View File

@ -30,7 +30,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

View File

@ -194,7 +194,7 @@ size_t rawSize(const StringRef & t)
class JoinBlockInputStream : public IBlockInputStream
{
public:
JoinBlockInputStream(const Join & parent_, size_t max_block_size_, Block && sample_block_)
JoinBlockInputStream(const Join & parent_, UInt64 max_block_size_, Block && sample_block_)
: parent(parent_), lock(parent.rwlock), max_block_size(max_block_size_), sample_block(std::move(sample_block_))
{
columns.resize(sample_block.columns());
@ -239,7 +239,7 @@ protected:
private:
const Join & parent;
std::shared_lock<std::shared_mutex> lock;
size_t max_block_size;
UInt64 max_block_size;
Block sample_block;
ColumnNumbers column_indices;
@ -362,7 +362,7 @@ BlockInputStreams StorageJoin::read(
const SelectQueryInfo & /*query_info*/,
const Context & /*context*/,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
UInt64 max_block_size,
unsigned /*num_streams*/)
{
check(column_names);

View File

@ -38,7 +38,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
private:

View File

@ -578,7 +578,7 @@ BlockInputStreams StorageLog::read(
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams)
{
check(column_names);

View File

@ -31,7 +31,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

View File

@ -186,7 +186,7 @@ BlockInputStreams StorageMaterializedView::read(
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
const size_t max_block_size,
const UInt64 max_block_size,
const unsigned num_streams)
{
auto storage = getTargetTable();

View File

@ -54,7 +54,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
String getDataPath() const override;

View File

@ -33,7 +33,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

View File

@ -177,7 +177,7 @@ BlockInputStreams StorageMerge::read(
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
const size_t max_block_size,
const UInt64 max_block_size,
const unsigned num_streams)
{
BlockInputStreams res;
@ -260,7 +260,7 @@ BlockInputStreams StorageMerge::read(
}
BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size, const Block & header, const StoragePtr & storage,
const UInt64 max_block_size, const Block & header, const StoragePtr & storage,
const TableStructureReadLockPtr & struct_lock, Names & real_column_names,
Context & modified_context, size_t streams_num, bool has_table_virtual_column,
bool concat_streams)

View File

@ -36,7 +36,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
void drop() override {}
@ -75,7 +75,7 @@ protected:
const Context & context, QueryProcessingStage::Enum processed_stage);
BlockInputStreams createSourceStreams(const SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size, const Block & header, const StoragePtr & storage,
const UInt64 max_block_size, const Block & header, const StoragePtr & storage,
const TableStructureReadLockPtr & struct_lock, Names & real_column_names,
Context & modified_context, size_t streams_num, bool has_table_virtual_column,
bool concat_streams = false);

View File

@ -118,7 +118,7 @@ BlockInputStreams StorageMergeTree::read(
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
const size_t max_block_size,
const UInt64 max_block_size,
const unsigned num_streams)
{
return reader.read(column_names, query_info, context, max_block_size, num_streams);

View File

@ -54,7 +54,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

View File

@ -52,7 +52,7 @@ BlockInputStreams StorageMySQL::read(
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
size_t max_block_size,
UInt64 max_block_size,
unsigned)
{
check(column_names);

View File

@ -37,7 +37,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

View File

@ -2920,7 +2920,7 @@ BlockInputStreams StorageReplicatedMergeTree::read(
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum /*processed_stage*/,
const size_t max_block_size,
const UInt64 max_block_size,
const unsigned num_streams)
{
const Settings & settings = context.getSettingsRef();

View File

@ -109,7 +109,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

View File

@ -33,7 +33,7 @@ public:
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
UInt64 max_block_size,
unsigned num_streams) override;
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;

Some files were not shown because too many files have changed in this diff Show More