From 2af874716450b4c53af669931c3c2c17cc486480 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Fri, 1 Sep 2017 21:21:01 +0300 Subject: [PATCH] Removed obsolete code [#CLICKHOUSE-3270]. --- dbms/src/Columns/ColumnAggregateFunction.cpp | 4 ++-- dbms/src/Columns/ColumnArray.cpp | 2 +- dbms/src/Columns/IColumn.h | 2 +- dbms/src/Common/typeid_cast.h | 1 + dbms/src/DataStreams/BinaryRowInputStream.cpp | 2 +- .../BlockOutputStreamFromRowOutputStream.cpp | 2 +- dbms/src/DataStreams/CSVRowInputStream.cpp | 2 +- dbms/src/DataStreams/IRowOutputStream.cpp | 2 +- dbms/src/DataStreams/JSONEachRowRowInputStream.cpp | 4 ++-- dbms/src/DataStreams/ODBCDriverBlockOutputStream.cpp | 2 +- dbms/src/DataStreams/TSKVRowInputStream.cpp | 4 ++-- dbms/src/DataStreams/TabSeparatedBlockOutputStream.cpp | 2 +- dbms/src/DataStreams/TabSeparatedRowInputStream.cpp | 2 +- dbms/src/DataStreams/ValuesRowInputStream.cpp | 4 ++-- dbms/src/DataStreams/VerticalRowOutputStream.cpp | 2 +- dbms/src/DataTypes/DataTypeAggregateFunction.cpp | 4 ++-- dbms/src/Storages/MergeTree/MergeTreeData.cpp | 4 ++-- dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp | 10 +++++----- .../Storages/MergeTree/MergeTreeDataSelectExecutor.cpp | 2 +- dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp | 4 ++-- dbms/src/Storages/MergeTree/MergeTreeReader.cpp | 2 +- .../src/Storages/MergeTree/MergedBlockOutputStream.cpp | 6 +++--- dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h | 4 ++-- 23 files changed, 37 insertions(+), 36 deletions(-) diff --git a/dbms/src/Columns/ColumnAggregateFunction.cpp b/dbms/src/Columns/ColumnAggregateFunction.cpp index 57a83a1fda8..5194a1d8ae8 100644 --- a/dbms/src/Columns/ColumnAggregateFunction.cpp +++ b/dbms/src/Columns/ColumnAggregateFunction.cpp @@ -176,7 +176,7 @@ size_t ColumnAggregateFunction::byteSize() const size_t res = getData().size() * sizeof(getData()[0]); for (const auto & arena : arenas) - res += arena.get()->size(); + res += arena->size(); return res; } @@ -188,7 +188,7 @@ size_t ColumnAggregateFunction::allocatedBytes() const size_t res = getData().allocated_bytes(); for (const auto & arena : arenas) - res += arena.get()->size(); + res += arena->size(); return res; } diff --git a/dbms/src/Columns/ColumnArray.cpp b/dbms/src/Columns/ColumnArray.cpp index 3531faef847..bc835df09db 100644 --- a/dbms/src/Columns/ColumnArray.cpp +++ b/dbms/src/Columns/ColumnArray.cpp @@ -328,7 +328,7 @@ ColumnPtr ColumnArray::convertToFullColumnIfConst() const else new_data = data; - if (auto full_column = offsets.get()->convertToFullColumnIfConst()) + if (auto full_column = offsets->convertToFullColumnIfConst()) new_offsets = full_column; else new_offsets = offsets; diff --git a/dbms/src/Columns/IColumn.h b/dbms/src/Columns/IColumn.h index dde614c845f..2b5925f889d 100644 --- a/dbms/src/Columns/IColumn.h +++ b/dbms/src/Columns/IColumn.h @@ -129,7 +129,7 @@ public: virtual ColumnPtr cut(size_t start, size_t length) const { ColumnPtr res = cloneEmpty(); - res.get()->insertRangeFrom(*this, start, length); + res->insertRangeFrom(*this, start, length); return res; } diff --git a/dbms/src/Common/typeid_cast.h b/dbms/src/Common/typeid_cast.h index e335f8f9672..7f0934ad0cb 100644 --- a/dbms/src/Common/typeid_cast.h +++ b/dbms/src/Common/typeid_cast.h @@ -2,6 +2,7 @@ #include #include +#include #include #include diff --git a/dbms/src/DataStreams/BinaryRowInputStream.cpp b/dbms/src/DataStreams/BinaryRowInputStream.cpp index 3f903118a12..c74d2658a76 100644 --- a/dbms/src/DataStreams/BinaryRowInputStream.cpp +++ b/dbms/src/DataStreams/BinaryRowInputStream.cpp @@ -19,7 +19,7 @@ bool BinaryRowInputStream::read(Block & block) size_t columns = block.columns(); for (size_t i = 0; i < columns; ++i) - block.getByPosition(i).type.get()->deserializeBinary(*block.getByPosition(i).column.get(), istr); + block.getByPosition(i).type->deserializeBinary(*block.getByPosition(i).column.get(), istr); return true; } diff --git a/dbms/src/DataStreams/BlockOutputStreamFromRowOutputStream.cpp b/dbms/src/DataStreams/BlockOutputStreamFromRowOutputStream.cpp index ab56b9c3ff1..8c466bc3c00 100644 --- a/dbms/src/DataStreams/BlockOutputStreamFromRowOutputStream.cpp +++ b/dbms/src/DataStreams/BlockOutputStreamFromRowOutputStream.cpp @@ -28,7 +28,7 @@ void BlockOutputStreamFromRowOutputStream::write(const Block & block) row_output->writeFieldDelimiter(); auto & col = block.getByPosition(j); - row_output->writeField(*col.column.get(), *col.type.get(), i); + row_output->writeField(*col.column, *col.type, i); } row_output->writeRowEndDelimiter(); diff --git a/dbms/src/DataStreams/CSVRowInputStream.cpp b/dbms/src/DataStreams/CSVRowInputStream.cpp index 7521abd690e..edfc096b973 100644 --- a/dbms/src/DataStreams/CSVRowInputStream.cpp +++ b/dbms/src/DataStreams/CSVRowInputStream.cpp @@ -125,7 +125,7 @@ bool CSVRowInputStream::read(Block & block) for (size_t i = 0; i < size; ++i) { skipWhitespacesAndTabs(istr); - data_types[i].get()->deserializeTextCSV(*block.getByPosition(i).column.get(), istr, delimiter); + data_types[i]->deserializeTextCSV(*block.getByPosition(i).column.get(), istr, delimiter); skipWhitespacesAndTabs(istr); skipDelimiter(istr, delimiter, i + 1 == size); diff --git a/dbms/src/DataStreams/IRowOutputStream.cpp b/dbms/src/DataStreams/IRowOutputStream.cpp index 489e497d125..f5d80bceb2b 100644 --- a/dbms/src/DataStreams/IRowOutputStream.cpp +++ b/dbms/src/DataStreams/IRowOutputStream.cpp @@ -17,7 +17,7 @@ void IRowOutputStream::write(const Block & block, size_t row_num) writeFieldDelimiter(); auto & col = block.getByPosition(i); - writeField(*col.column.get(), *col.type.get(), row_num); + writeField(*col.column, *col.type, row_num); } writeRowEndDelimiter(); diff --git a/dbms/src/DataStreams/JSONEachRowRowInputStream.cpp b/dbms/src/DataStreams/JSONEachRowRowInputStream.cpp index b4d602c7598..69dcc368edb 100644 --- a/dbms/src/DataStreams/JSONEachRowRowInputStream.cpp +++ b/dbms/src/DataStreams/JSONEachRowRowInputStream.cpp @@ -120,7 +120,7 @@ bool JSONEachRowRowInputStream::read(Block & block) read_columns[index] = true; auto & col = block.getByPosition(index); - col.type.get()->deserializeTextJSON(*col.column.get(), istr); + col.type->deserializeTextJSON(*col.column, istr); } skipWhitespaceIfAny(istr); @@ -130,7 +130,7 @@ bool JSONEachRowRowInputStream::read(Block & block) /// Fill non-visited columns with the default values. for (size_t i = 0; i < columns; ++i) if (!read_columns[i]) - block.getByPosition(i).column.get()->insertDefault(); + block.getByPosition(i).column->insertDefault(); return true; } diff --git a/dbms/src/DataStreams/ODBCDriverBlockOutputStream.cpp b/dbms/src/DataStreams/ODBCDriverBlockOutputStream.cpp index d250d601e98..71161eeb117 100644 --- a/dbms/src/DataStreams/ODBCDriverBlockOutputStream.cpp +++ b/dbms/src/DataStreams/ODBCDriverBlockOutputStream.cpp @@ -33,7 +33,7 @@ void ODBCDriverBlockOutputStream::write(const Block & block) { WriteBufferFromString text_out(text_value); - col.type->serializeText(*col.column.get(), i, text_out); + col.type->serializeText(*col.column, i, text_out); } writeStringBinary(text_value, out); diff --git a/dbms/src/DataStreams/TSKVRowInputStream.cpp b/dbms/src/DataStreams/TSKVRowInputStream.cpp index 9216cddc8b4..fd1d42c283a 100644 --- a/dbms/src/DataStreams/TSKVRowInputStream.cpp +++ b/dbms/src/DataStreams/TSKVRowInputStream.cpp @@ -136,7 +136,7 @@ bool TSKVRowInputStream::read(Block & block) read_columns[index] = true; auto & col = block.getByPosition(index); - col.type.get()->deserializeTextEscaped(*col.column.get(), istr); + col.type->deserializeTextEscaped(*col.column, istr); } } else @@ -177,7 +177,7 @@ bool TSKVRowInputStream::read(Block & block) /// Fill in the not met columns with default values. for (size_t i = 0; i < columns; ++i) if (!read_columns[i]) - block.getByPosition(i).column.get()->insertDefault(); + block.getByPosition(i).column->insertDefault(); return true; } diff --git a/dbms/src/DataStreams/TabSeparatedBlockOutputStream.cpp b/dbms/src/DataStreams/TabSeparatedBlockOutputStream.cpp index 63992a2a860..10863a72234 100644 --- a/dbms/src/DataStreams/TabSeparatedBlockOutputStream.cpp +++ b/dbms/src/DataStreams/TabSeparatedBlockOutputStream.cpp @@ -23,7 +23,7 @@ void TabSeparatedBlockOutputStream::write(const Block & block) { if (j != 0) ostr.write('\t'); - col.type->serializeTextEscaped(*col.column.get(), j, ostr); + col.type->serializeTextEscaped(*col.column, j, ostr); } ostr.write('\n'); } diff --git a/dbms/src/DataStreams/TabSeparatedRowInputStream.cpp b/dbms/src/DataStreams/TabSeparatedRowInputStream.cpp index 35bc8ac89a1..b5cbf6db6d7 100644 --- a/dbms/src/DataStreams/TabSeparatedRowInputStream.cpp +++ b/dbms/src/DataStreams/TabSeparatedRowInputStream.cpp @@ -84,7 +84,7 @@ bool TabSeparatedRowInputStream::read(Block & block) for (size_t i = 0; i < size; ++i) { - data_types[i].get()->deserializeTextEscaped(*block.getByPosition(i).column.get(), istr); + data_types[i]->deserializeTextEscaped(*block.getByPosition(i).column.get(), istr); /// skip separators if (i + 1 == size) diff --git a/dbms/src/DataStreams/ValuesRowInputStream.cpp b/dbms/src/DataStreams/ValuesRowInputStream.cpp index 5c795dd123e..1c14de4afa5 100644 --- a/dbms/src/DataStreams/ValuesRowInputStream.cpp +++ b/dbms/src/DataStreams/ValuesRowInputStream.cpp @@ -63,7 +63,7 @@ bool ValuesRowInputStream::read(Block & block) bool rollback_on_exception = false; try { - col.type.get()->deserializeTextQuoted(*col.column.get(), istr); + col.type->deserializeTextQuoted(*col.column, istr); rollback_on_exception = true; skipWhitespaceIfAny(istr); @@ -94,7 +94,7 @@ bool ValuesRowInputStream::read(Block & block) throw; if (rollback_on_exception) - col.column.get()->popBack(1); + col.column->popBack(1); IDataType & type = *block.safeGetByPosition(i).type; diff --git a/dbms/src/DataStreams/VerticalRowOutputStream.cpp b/dbms/src/DataStreams/VerticalRowOutputStream.cpp index 2fae4b4dcc2..b4f6428987e 100644 --- a/dbms/src/DataStreams/VerticalRowOutputStream.cpp +++ b/dbms/src/DataStreams/VerticalRowOutputStream.cpp @@ -152,7 +152,7 @@ void VerticalRowOutputStream::writeSpecialRow(const Block & block, size_t row_nu writeFieldDelimiter(); auto & col = block.getByPosition(i); - writeField(*col.column.get(), *col.type.get(), row_num); + writeField(*col.column, *col.type, row_num); } } diff --git a/dbms/src/DataTypes/DataTypeAggregateFunction.cpp b/dbms/src/DataTypes/DataTypeAggregateFunction.cpp index 5da0fd620a0..1906b31304d 100644 --- a/dbms/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/dbms/src/DataTypes/DataTypeAggregateFunction.cpp @@ -70,7 +70,7 @@ void DataTypeAggregateFunction::deserializeBinary(Field & field, ReadBuffer & is void DataTypeAggregateFunction::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const { - function.get()->serialize(static_cast(column).getData()[row_num], ostr); + function->serialize(static_cast(column).getData()[row_num], ostr); } void DataTypeAggregateFunction::deserializeBinary(IColumn & column, ReadBuffer & istr) const @@ -147,7 +147,7 @@ void DataTypeAggregateFunction::deserializeBinaryBulk(IColumn & column, ReadBuff static String serializeToString(const AggregateFunctionPtr & function, const IColumn & column, size_t row_num) { WriteBufferFromOwnString buffer; - function.get()->serialize(static_cast(column).getData()[row_num], buffer); + function->serialize(static_cast(column).getData()[row_num], buffer); return buffer.str(); } diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index 02864db0f33..e4fc0c51f0a 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -809,7 +809,7 @@ void MergeTreeData::createConvertExpression(const DataPartPtr & part, const Name { if (!new_types.count(column.name)) { - bool is_nullable = column.type.get()->isNullable(); + bool is_nullable = column.type->isNullable(); if (!part || part->hasColumnFiles(column.name)) { @@ -1049,7 +1049,7 @@ MergeTreeData::AlterDataPartTransactionPtr MergeTreeData::alterDataPart( for (size_t i = 0, size = part->size; i < size; ++i) for (size_t j = 0; j < new_key_size; ++j) - new_primary_key_sample.getByPosition(j).type.get()->serializeBinary(*new_index[j].get(), i, index_stream); + new_primary_key_sample.getByPosition(j).type->serializeBinary(*new_index[j].get(), i, index_stream); transaction->rename_map["primary.idx.tmp"] = "primary.idx"; diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp index 0b86571a25f..3af6d0d8a91 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp @@ -630,8 +630,8 @@ void MergeTreeDataPart::loadIndex() for (size_t i = 0; i < key_size; ++i) { - index[i] = storage.primary_key_data_types[i].get()->createColumn(); - index[i].get()->reserve(size); + index[i] = storage.primary_key_data_types[i]->createColumn(); + index[i]->reserve(size); } String index_path = getFullPath() + "primary.idx"; @@ -640,12 +640,12 @@ void MergeTreeDataPart::loadIndex() for (size_t i = 0; i < size; ++i) for (size_t j = 0; j < key_size; ++j) - storage.primary_key_data_types[j].get()->deserializeBinary(*index[j].get(), index_file); + storage.primary_key_data_types[j]->deserializeBinary(*index[j].get(), index_file); for (size_t i = 0; i < key_size; ++i) - if (index[i].get()->size() != size) + if (index[i]->size() != size) throw Exception("Cannot read all data from index file " + index_path - + "(expected size: " + toString(size) + ", read: " + toString(index[i].get()->size()) + ")", + + "(expected size: " + toString(size) + ", read: " + toString(index[i]->size()) + ")", ErrorCodes::CANNOT_READ_ALL_DATA); if (!index_file.eof()) diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 68bb0edfa39..04f912a5372 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -854,7 +854,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( MarkRanges res; size_t used_key_size = key_condition.getMaxKeyColumn() + 1; - size_t marks_count = index.at(0).get()->size(); + size_t marks_count = index.at(0)->size(); /// If index is not used. if (key_condition.alwaysUnknownOrTrue()) diff --git a/dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp b/dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp index 723e208a8d5..6ce836828e8 100644 --- a/dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreePartChecker.cpp @@ -332,7 +332,7 @@ void MergeTreePartChecker::checkDataPart( Columns tmp_columns(key_size); for (size_t j = 0; j < key_size; ++j) - tmp_columns[j] = primary_key_data_types[j].get()->createColumn(); + tmp_columns[j] = primary_key_data_types[j]->createColumn(); while (!hashing_buf.eof()) { @@ -341,7 +341,7 @@ void MergeTreePartChecker::checkDataPart( ++marks_in_primary_key; for (size_t j = 0; j < key_size; ++j) - primary_key_data_types[j].get()->deserializeBinary(*tmp_columns[j].get(), hashing_buf); + primary_key_data_types[j]->deserializeBinary(*tmp_columns[j].get(), hashing_buf); } } else diff --git a/dbms/src/Storages/MergeTree/MergeTreeReader.cpp b/dbms/src/Storages/MergeTree/MergeTreeReader.cpp index c1f7b4bea53..5391561013f 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReader.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReader.cpp @@ -110,7 +110,7 @@ size_t MergeTreeReader::readRows(size_t from_mark, bool continue_reading, size_t const IDataType * observed_type; bool is_nullable; - if (column.type.get()->isNullable()) + if (column.type->isNullable()) { const DataTypeNullable & nullable_type = static_cast(*column.type); observed_type = nullable_type.getNestedType().get(); diff --git a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 4936cd82e49..062dfe4ab33 100644 --- a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -500,7 +500,7 @@ void MergedBlockOutputStream::writeImpl(const Block & block, const IColumn::Perm { index_columns.resize(sort_description.size()); for (size_t i = 0, size = sort_description.size(); i < size; ++i) - index_columns[i] = primary_columns[i].column.get()->cloneEmpty(); + index_columns[i] = primary_columns[i].column->cloneEmpty(); } /// Now write the data. @@ -545,8 +545,8 @@ void MergedBlockOutputStream::writeImpl(const Block & block, const IColumn::Perm for (size_t j = 0, size = primary_columns.size(); j < size; ++j) { const IColumn & primary_column = *primary_columns[j].column.get(); - index_columns[j].get()->insertFrom(primary_column, i); - primary_columns[j].type.get()->serializeBinary(primary_column, i, *index_stream); + index_columns[j]->insertFrom(primary_column, i); + primary_columns[j].type->serializeBinary(primary_column, i, *index_stream); } } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index a96710db8b8..93b515a4e4c 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -28,8 +28,8 @@ private: { bool operator()(const LogEntryPtr & lhs, const LogEntryPtr & rhs) const { - return std::forward_as_tuple(lhs.get()->create_time, lhs.get()) - < std::forward_as_tuple(rhs.get()->create_time, rhs.get()); + return std::forward_as_tuple(lhs->create_time, lhs.get()) + < std::forward_as_tuple(rhs->create_time, rhs.get()); } };