mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-03 13:02:00 +00:00
Removed obsolete code [#CLICKHOUSE-3270].
This commit is contained in:
parent
1a8e22c37f
commit
2af8747164
@ -176,7 +176,7 @@ size_t ColumnAggregateFunction::byteSize() const
|
|||||||
size_t res = getData().size() * sizeof(getData()[0]);
|
size_t res = getData().size() * sizeof(getData()[0]);
|
||||||
|
|
||||||
for (const auto & arena : arenas)
|
for (const auto & arena : arenas)
|
||||||
res += arena.get()->size();
|
res += arena->size();
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -188,7 +188,7 @@ size_t ColumnAggregateFunction::allocatedBytes() const
|
|||||||
size_t res = getData().allocated_bytes();
|
size_t res = getData().allocated_bytes();
|
||||||
|
|
||||||
for (const auto & arena : arenas)
|
for (const auto & arena : arenas)
|
||||||
res += arena.get()->size();
|
res += arena->size();
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -328,7 +328,7 @@ ColumnPtr ColumnArray::convertToFullColumnIfConst() const
|
|||||||
else
|
else
|
||||||
new_data = data;
|
new_data = data;
|
||||||
|
|
||||||
if (auto full_column = offsets.get()->convertToFullColumnIfConst())
|
if (auto full_column = offsets->convertToFullColumnIfConst())
|
||||||
new_offsets = full_column;
|
new_offsets = full_column;
|
||||||
else
|
else
|
||||||
new_offsets = offsets;
|
new_offsets = offsets;
|
||||||
|
@ -129,7 +129,7 @@ public:
|
|||||||
virtual ColumnPtr cut(size_t start, size_t length) const
|
virtual ColumnPtr cut(size_t start, size_t length) const
|
||||||
{
|
{
|
||||||
ColumnPtr res = cloneEmpty();
|
ColumnPtr res = cloneEmpty();
|
||||||
res.get()->insertRangeFrom(*this, start, length);
|
res->insertRangeFrom(*this, start, length);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
#include <typeinfo>
|
#include <typeinfo>
|
||||||
|
#include <typeindex>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
@ -19,7 +19,7 @@ bool BinaryRowInputStream::read(Block & block)
|
|||||||
|
|
||||||
size_t columns = block.columns();
|
size_t columns = block.columns();
|
||||||
for (size_t i = 0; i < columns; ++i)
|
for (size_t i = 0; i < columns; ++i)
|
||||||
block.getByPosition(i).type.get()->deserializeBinary(*block.getByPosition(i).column.get(), istr);
|
block.getByPosition(i).type->deserializeBinary(*block.getByPosition(i).column.get(), istr);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ void BlockOutputStreamFromRowOutputStream::write(const Block & block)
|
|||||||
row_output->writeFieldDelimiter();
|
row_output->writeFieldDelimiter();
|
||||||
|
|
||||||
auto & col = block.getByPosition(j);
|
auto & col = block.getByPosition(j);
|
||||||
row_output->writeField(*col.column.get(), *col.type.get(), i);
|
row_output->writeField(*col.column, *col.type, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
row_output->writeRowEndDelimiter();
|
row_output->writeRowEndDelimiter();
|
||||||
|
@ -125,7 +125,7 @@ bool CSVRowInputStream::read(Block & block)
|
|||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
skipWhitespacesAndTabs(istr);
|
skipWhitespacesAndTabs(istr);
|
||||||
data_types[i].get()->deserializeTextCSV(*block.getByPosition(i).column.get(), istr, delimiter);
|
data_types[i]->deserializeTextCSV(*block.getByPosition(i).column.get(), istr, delimiter);
|
||||||
skipWhitespacesAndTabs(istr);
|
skipWhitespacesAndTabs(istr);
|
||||||
|
|
||||||
skipDelimiter(istr, delimiter, i + 1 == size);
|
skipDelimiter(istr, delimiter, i + 1 == size);
|
||||||
|
@ -17,7 +17,7 @@ void IRowOutputStream::write(const Block & block, size_t row_num)
|
|||||||
writeFieldDelimiter();
|
writeFieldDelimiter();
|
||||||
|
|
||||||
auto & col = block.getByPosition(i);
|
auto & col = block.getByPosition(i);
|
||||||
writeField(*col.column.get(), *col.type.get(), row_num);
|
writeField(*col.column, *col.type, row_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
writeRowEndDelimiter();
|
writeRowEndDelimiter();
|
||||||
|
@ -120,7 +120,7 @@ bool JSONEachRowRowInputStream::read(Block & block)
|
|||||||
read_columns[index] = true;
|
read_columns[index] = true;
|
||||||
|
|
||||||
auto & col = block.getByPosition(index);
|
auto & col = block.getByPosition(index);
|
||||||
col.type.get()->deserializeTextJSON(*col.column.get(), istr);
|
col.type->deserializeTextJSON(*col.column, istr);
|
||||||
}
|
}
|
||||||
|
|
||||||
skipWhitespaceIfAny(istr);
|
skipWhitespaceIfAny(istr);
|
||||||
@ -130,7 +130,7 @@ bool JSONEachRowRowInputStream::read(Block & block)
|
|||||||
/// Fill non-visited columns with the default values.
|
/// Fill non-visited columns with the default values.
|
||||||
for (size_t i = 0; i < columns; ++i)
|
for (size_t i = 0; i < columns; ++i)
|
||||||
if (!read_columns[i])
|
if (!read_columns[i])
|
||||||
block.getByPosition(i).column.get()->insertDefault();
|
block.getByPosition(i).column->insertDefault();
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ void ODBCDriverBlockOutputStream::write(const Block & block)
|
|||||||
|
|
||||||
{
|
{
|
||||||
WriteBufferFromString text_out(text_value);
|
WriteBufferFromString text_out(text_value);
|
||||||
col.type->serializeText(*col.column.get(), i, text_out);
|
col.type->serializeText(*col.column, i, text_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
writeStringBinary(text_value, out);
|
writeStringBinary(text_value, out);
|
||||||
|
@ -136,7 +136,7 @@ bool TSKVRowInputStream::read(Block & block)
|
|||||||
read_columns[index] = true;
|
read_columns[index] = true;
|
||||||
|
|
||||||
auto & col = block.getByPosition(index);
|
auto & col = block.getByPosition(index);
|
||||||
col.type.get()->deserializeTextEscaped(*col.column.get(), istr);
|
col.type->deserializeTextEscaped(*col.column, istr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -177,7 +177,7 @@ bool TSKVRowInputStream::read(Block & block)
|
|||||||
/// Fill in the not met columns with default values.
|
/// Fill in the not met columns with default values.
|
||||||
for (size_t i = 0; i < columns; ++i)
|
for (size_t i = 0; i < columns; ++i)
|
||||||
if (!read_columns[i])
|
if (!read_columns[i])
|
||||||
block.getByPosition(i).column.get()->insertDefault();
|
block.getByPosition(i).column->insertDefault();
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ void TabSeparatedBlockOutputStream::write(const Block & block)
|
|||||||
{
|
{
|
||||||
if (j != 0)
|
if (j != 0)
|
||||||
ostr.write('\t');
|
ostr.write('\t');
|
||||||
col.type->serializeTextEscaped(*col.column.get(), j, ostr);
|
col.type->serializeTextEscaped(*col.column, j, ostr);
|
||||||
}
|
}
|
||||||
ostr.write('\n');
|
ostr.write('\n');
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ bool TabSeparatedRowInputStream::read(Block & block)
|
|||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
data_types[i].get()->deserializeTextEscaped(*block.getByPosition(i).column.get(), istr);
|
data_types[i]->deserializeTextEscaped(*block.getByPosition(i).column.get(), istr);
|
||||||
|
|
||||||
/// skip separators
|
/// skip separators
|
||||||
if (i + 1 == size)
|
if (i + 1 == size)
|
||||||
|
@ -63,7 +63,7 @@ bool ValuesRowInputStream::read(Block & block)
|
|||||||
bool rollback_on_exception = false;
|
bool rollback_on_exception = false;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
col.type.get()->deserializeTextQuoted(*col.column.get(), istr);
|
col.type->deserializeTextQuoted(*col.column, istr);
|
||||||
rollback_on_exception = true;
|
rollback_on_exception = true;
|
||||||
skipWhitespaceIfAny(istr);
|
skipWhitespaceIfAny(istr);
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ bool ValuesRowInputStream::read(Block & block)
|
|||||||
throw;
|
throw;
|
||||||
|
|
||||||
if (rollback_on_exception)
|
if (rollback_on_exception)
|
||||||
col.column.get()->popBack(1);
|
col.column->popBack(1);
|
||||||
|
|
||||||
IDataType & type = *block.safeGetByPosition(i).type;
|
IDataType & type = *block.safeGetByPosition(i).type;
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ void VerticalRowOutputStream::writeSpecialRow(const Block & block, size_t row_nu
|
|||||||
writeFieldDelimiter();
|
writeFieldDelimiter();
|
||||||
|
|
||||||
auto & col = block.getByPosition(i);
|
auto & col = block.getByPosition(i);
|
||||||
writeField(*col.column.get(), *col.type.get(), row_num);
|
writeField(*col.column, *col.type, row_num);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ void DataTypeAggregateFunction::deserializeBinary(Field & field, ReadBuffer & is
|
|||||||
|
|
||||||
void DataTypeAggregateFunction::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
void DataTypeAggregateFunction::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
||||||
{
|
{
|
||||||
function.get()->serialize(static_cast<const ColumnAggregateFunction &>(column).getData()[row_num], ostr);
|
function->serialize(static_cast<const ColumnAggregateFunction &>(column).getData()[row_num], ostr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DataTypeAggregateFunction::deserializeBinary(IColumn & column, ReadBuffer & istr) const
|
void DataTypeAggregateFunction::deserializeBinary(IColumn & column, ReadBuffer & istr) const
|
||||||
@ -147,7 +147,7 @@ void DataTypeAggregateFunction::deserializeBinaryBulk(IColumn & column, ReadBuff
|
|||||||
static String serializeToString(const AggregateFunctionPtr & function, const IColumn & column, size_t row_num)
|
static String serializeToString(const AggregateFunctionPtr & function, const IColumn & column, size_t row_num)
|
||||||
{
|
{
|
||||||
WriteBufferFromOwnString buffer;
|
WriteBufferFromOwnString buffer;
|
||||||
function.get()->serialize(static_cast<const ColumnAggregateFunction &>(column).getData()[row_num], buffer);
|
function->serialize(static_cast<const ColumnAggregateFunction &>(column).getData()[row_num], buffer);
|
||||||
return buffer.str();
|
return buffer.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -809,7 +809,7 @@ void MergeTreeData::createConvertExpression(const DataPartPtr & part, const Name
|
|||||||
{
|
{
|
||||||
if (!new_types.count(column.name))
|
if (!new_types.count(column.name))
|
||||||
{
|
{
|
||||||
bool is_nullable = column.type.get()->isNullable();
|
bool is_nullable = column.type->isNullable();
|
||||||
|
|
||||||
if (!part || part->hasColumnFiles(column.name))
|
if (!part || part->hasColumnFiles(column.name))
|
||||||
{
|
{
|
||||||
@ -1049,7 +1049,7 @@ MergeTreeData::AlterDataPartTransactionPtr MergeTreeData::alterDataPart(
|
|||||||
|
|
||||||
for (size_t i = 0, size = part->size; i < size; ++i)
|
for (size_t i = 0, size = part->size; i < size; ++i)
|
||||||
for (size_t j = 0; j < new_key_size; ++j)
|
for (size_t j = 0; j < new_key_size; ++j)
|
||||||
new_primary_key_sample.getByPosition(j).type.get()->serializeBinary(*new_index[j].get(), i, index_stream);
|
new_primary_key_sample.getByPosition(j).type->serializeBinary(*new_index[j].get(), i, index_stream);
|
||||||
|
|
||||||
transaction->rename_map["primary.idx.tmp"] = "primary.idx";
|
transaction->rename_map["primary.idx.tmp"] = "primary.idx";
|
||||||
|
|
||||||
|
@ -630,8 +630,8 @@ void MergeTreeDataPart::loadIndex()
|
|||||||
|
|
||||||
for (size_t i = 0; i < key_size; ++i)
|
for (size_t i = 0; i < key_size; ++i)
|
||||||
{
|
{
|
||||||
index[i] = storage.primary_key_data_types[i].get()->createColumn();
|
index[i] = storage.primary_key_data_types[i]->createColumn();
|
||||||
index[i].get()->reserve(size);
|
index[i]->reserve(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
String index_path = getFullPath() + "primary.idx";
|
String index_path = getFullPath() + "primary.idx";
|
||||||
@ -640,12 +640,12 @@ void MergeTreeDataPart::loadIndex()
|
|||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
for (size_t j = 0; j < key_size; ++j)
|
for (size_t j = 0; j < key_size; ++j)
|
||||||
storage.primary_key_data_types[j].get()->deserializeBinary(*index[j].get(), index_file);
|
storage.primary_key_data_types[j]->deserializeBinary(*index[j].get(), index_file);
|
||||||
|
|
||||||
for (size_t i = 0; i < key_size; ++i)
|
for (size_t i = 0; i < key_size; ++i)
|
||||||
if (index[i].get()->size() != size)
|
if (index[i]->size() != size)
|
||||||
throw Exception("Cannot read all data from index file " + index_path
|
throw Exception("Cannot read all data from index file " + index_path
|
||||||
+ "(expected size: " + toString(size) + ", read: " + toString(index[i].get()->size()) + ")",
|
+ "(expected size: " + toString(size) + ", read: " + toString(index[i]->size()) + ")",
|
||||||
ErrorCodes::CANNOT_READ_ALL_DATA);
|
ErrorCodes::CANNOT_READ_ALL_DATA);
|
||||||
|
|
||||||
if (!index_file.eof())
|
if (!index_file.eof())
|
||||||
|
@ -854,7 +854,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
|
|||||||
MarkRanges res;
|
MarkRanges res;
|
||||||
|
|
||||||
size_t used_key_size = key_condition.getMaxKeyColumn() + 1;
|
size_t used_key_size = key_condition.getMaxKeyColumn() + 1;
|
||||||
size_t marks_count = index.at(0).get()->size();
|
size_t marks_count = index.at(0)->size();
|
||||||
|
|
||||||
/// If index is not used.
|
/// If index is not used.
|
||||||
if (key_condition.alwaysUnknownOrTrue())
|
if (key_condition.alwaysUnknownOrTrue())
|
||||||
|
@ -332,7 +332,7 @@ void MergeTreePartChecker::checkDataPart(
|
|||||||
Columns tmp_columns(key_size);
|
Columns tmp_columns(key_size);
|
||||||
|
|
||||||
for (size_t j = 0; j < key_size; ++j)
|
for (size_t j = 0; j < key_size; ++j)
|
||||||
tmp_columns[j] = primary_key_data_types[j].get()->createColumn();
|
tmp_columns[j] = primary_key_data_types[j]->createColumn();
|
||||||
|
|
||||||
while (!hashing_buf.eof())
|
while (!hashing_buf.eof())
|
||||||
{
|
{
|
||||||
@ -341,7 +341,7 @@ void MergeTreePartChecker::checkDataPart(
|
|||||||
|
|
||||||
++marks_in_primary_key;
|
++marks_in_primary_key;
|
||||||
for (size_t j = 0; j < key_size; ++j)
|
for (size_t j = 0; j < key_size; ++j)
|
||||||
primary_key_data_types[j].get()->deserializeBinary(*tmp_columns[j].get(), hashing_buf);
|
primary_key_data_types[j]->deserializeBinary(*tmp_columns[j].get(), hashing_buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -110,7 +110,7 @@ size_t MergeTreeReader::readRows(size_t from_mark, bool continue_reading, size_t
|
|||||||
const IDataType * observed_type;
|
const IDataType * observed_type;
|
||||||
bool is_nullable;
|
bool is_nullable;
|
||||||
|
|
||||||
if (column.type.get()->isNullable())
|
if (column.type->isNullable())
|
||||||
{
|
{
|
||||||
const DataTypeNullable & nullable_type = static_cast<const DataTypeNullable &>(*column.type);
|
const DataTypeNullable & nullable_type = static_cast<const DataTypeNullable &>(*column.type);
|
||||||
observed_type = nullable_type.getNestedType().get();
|
observed_type = nullable_type.getNestedType().get();
|
||||||
|
@ -500,7 +500,7 @@ void MergedBlockOutputStream::writeImpl(const Block & block, const IColumn::Perm
|
|||||||
{
|
{
|
||||||
index_columns.resize(sort_description.size());
|
index_columns.resize(sort_description.size());
|
||||||
for (size_t i = 0, size = sort_description.size(); i < size; ++i)
|
for (size_t i = 0, size = sort_description.size(); i < size; ++i)
|
||||||
index_columns[i] = primary_columns[i].column.get()->cloneEmpty();
|
index_columns[i] = primary_columns[i].column->cloneEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Now write the data.
|
/// Now write the data.
|
||||||
@ -545,8 +545,8 @@ void MergedBlockOutputStream::writeImpl(const Block & block, const IColumn::Perm
|
|||||||
for (size_t j = 0, size = primary_columns.size(); j < size; ++j)
|
for (size_t j = 0, size = primary_columns.size(); j < size; ++j)
|
||||||
{
|
{
|
||||||
const IColumn & primary_column = *primary_columns[j].column.get();
|
const IColumn & primary_column = *primary_columns[j].column.get();
|
||||||
index_columns[j].get()->insertFrom(primary_column, i);
|
index_columns[j]->insertFrom(primary_column, i);
|
||||||
primary_columns[j].type.get()->serializeBinary(primary_column, i, *index_stream);
|
primary_columns[j].type->serializeBinary(primary_column, i, *index_stream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,8 +28,8 @@ private:
|
|||||||
{
|
{
|
||||||
bool operator()(const LogEntryPtr & lhs, const LogEntryPtr & rhs) const
|
bool operator()(const LogEntryPtr & lhs, const LogEntryPtr & rhs) const
|
||||||
{
|
{
|
||||||
return std::forward_as_tuple(lhs.get()->create_time, lhs.get())
|
return std::forward_as_tuple(lhs->create_time, lhs.get())
|
||||||
< std::forward_as_tuple(rhs.get()->create_time, rhs.get());
|
< std::forward_as_tuple(rhs->create_time, rhs.get());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user