dbms: tiny improvement [#METR-10931].

This commit is contained in:
Alexey Milovidov 2014-04-23 02:43:55 +04:00
parent 6bec60bb39
commit a3caebf31b
6 changed files with 26 additions and 26 deletions

View File

@ -263,7 +263,7 @@ private:
}
};
typedef std::map<std::string, SharedPtr<Stream> > FileStreams;
typedef std::map<std::string, std::unique_ptr<Stream> > FileStreams;
String path;
FileStreams streams;
@ -293,13 +293,13 @@ private:
+ ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
if (!streams.count(size_name))
streams.insert(std::make_pair(size_name, new Stream(
streams.emplace(size_name, std::unique_ptr<Stream>(new Stream(
path + escaped_size_name, uncompressed_cache, mark_cache)));
addStream(name, *type_arr->getNestedType(), level + 1);
}
else
streams[name] = new Stream(path + escaped_column_name, uncompressed_cache, mark_cache);
streams[name].reset(new Stream(path + escaped_column_name, uncompressed_cache, mark_cache));
}
void readData(const String & name, const IDataType & type, IColumn & column, size_t from_mark, size_t max_rows_to_read,

View File

@ -70,7 +70,7 @@ protected:
}
};
typedef std::map<String, SharedPtr<ColumnStream> > ColumnStreams;
typedef std::map<String, std::unique_ptr<ColumnStream> > ColumnStreams;
void addStream(const String & path, const String & name, const IDataType & type, size_t level = 0, String filename = "")
{
@ -88,20 +88,20 @@ protected:
String escaped_size_name = escapeForFileName(DataTypeNested::extractNestedTableName(name))
+ ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
column_streams[size_name] = new ColumnStream(
column_streams[size_name].reset(new ColumnStream(
escaped_size_name,
path + escaped_size_name + ".bin",
path + escaped_size_name + ".mrk",
max_compress_block_size);
max_compress_block_size));
addStream(path, name, *type_arr->getNestedType(), level + 1);
}
else
column_streams[name] = new ColumnStream(
column_streams[name].reset(new ColumnStream(
escaped_column_name,
path + escaped_column_name + ".bin",
path + escaped_column_name + ".mrk",
max_compress_block_size);
max_compress_block_size));
}

View File

@ -68,7 +68,7 @@ private:
CompressedReadBuffer compressed;
};
typedef std::map<std::string, SharedPtr<Stream> > FileStreams;
typedef std::map<std::string, std::unique_ptr<Stream> > FileStreams;
FileStreams streams;
void addStream(const String & name, const IDataType & type, size_t level = 0);
@ -109,7 +109,7 @@ private:
typedef std::vector<std::pair<size_t, Mark> > MarksForColumns;
typedef std::map<std::string, SharedPtr<Stream> > FileStreams;
typedef std::map<std::string, std::unique_ptr<Stream> > FileStreams;
FileStreams streams;
typedef std::set<std::string> OffsetColumns;

View File

@ -48,7 +48,7 @@ private:
CompressedReadBuffer compressed;
};
typedef std::map<std::string, SharedPtr<Stream> > FileStreams;
typedef std::map<std::string, std::unique_ptr<Stream> > FileStreams;
FileStreams streams;
void addStream(const String & name, const IDataType & type, size_t level = 0);
@ -83,7 +83,7 @@ private:
}
};
typedef std::map<std::string, SharedPtr<Stream> > FileStreams;
typedef std::map<std::string, std::unique_ptr<Stream> > FileStreams;
FileStreams streams;
typedef std::set<std::string> OffsetColumns;

View File

@ -166,7 +166,7 @@ void LogBlockInputStream::addStream(const String & name, const IDataType & type,
{
String size_name = DataTypeNested::extractNestedTableName(name) + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
if (!streams.count(size_name))
streams.insert(std::make_pair(size_name, new Stream(
streams.emplace(size_name, std::unique_ptr<Stream>(new Stream(
storage.files[size_name].data_file.path(),
mark_number
? storage.files[size_name].marks[mark_number].offset
@ -177,22 +177,22 @@ void LogBlockInputStream::addStream(const String & name, const IDataType & type,
else if (const DataTypeNested * type_nested = dynamic_cast<const DataTypeNested *>(&type))
{
String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
streams[size_name] = new Stream(
streams[size_name].reset(new Stream(
storage.files[size_name].data_file.path(),
mark_number
? storage.files[size_name].marks[mark_number].offset
: 0);
: 0));
const NamesAndTypesList & columns = *type_nested->getNestedTypesList();
for (NamesAndTypesList::const_iterator it = columns.begin(); it != columns.end(); ++it)
addStream(DataTypeNested::concatenateNestedName(name, it->first), *it->second, level + 1);
}
else
streams[name] = new Stream(
streams[name].reset(new Stream(
storage.files[name].data_file.path(),
mark_number
? storage.files[name].marks[mark_number].offset
: 0);
: 0));
}
@ -292,7 +292,7 @@ void LogBlockOutputStream::addStream(const String & name, const IDataType & type
{
String size_name = DataTypeNested::extractNestedTableName(name) + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
if (!streams.count(size_name))
streams.insert(std::make_pair(size_name, new Stream(
streams.emplace(size_name, std::unique_ptr<Stream>(new Stream(
storage.files[size_name].data_file.path(), storage.max_compress_block_size)));
addStream(name, *type_arr->getNestedType(), level + 1);
@ -300,14 +300,14 @@ void LogBlockOutputStream::addStream(const String & name, const IDataType & type
else if (const DataTypeNested * type_nested = dynamic_cast<const DataTypeNested *>(&type))
{
String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
streams[size_name] = new Stream(storage.files[size_name].data_file.path(), storage.max_compress_block_size);
streams[size_name].reset(new Stream(storage.files[size_name].data_file.path(), storage.max_compress_block_size));
const NamesAndTypesList & columns = *type_nested->getNestedTypesList();
for (NamesAndTypesList::const_iterator it = columns.begin(); it != columns.end(); ++it)
addStream(DataTypeNested::concatenateNestedName(name, it->first), *it->second, level + 1);
}
else
streams[name] = new Stream(storage.files[name].data_file.path(), storage.max_compress_block_size);
streams[name].reset(new Stream(storage.files[name].data_file.path(), storage.max_compress_block_size));
}

View File

@ -116,21 +116,21 @@ void TinyLogBlockInputStream::addStream(const String & name, const IDataType & t
{
String size_name = DataTypeNested::extractNestedTableName(name) + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
if (!streams.count(size_name))
streams.insert(std::make_pair(size_name, new Stream(storage.files[size_name].data_file.path())));
streams.emplace(size_name, std::unique_ptr<Stream>(new Stream(storage.files[size_name].data_file.path())));
addStream(name, *type_arr->getNestedType(), level + 1);
}
else if (const DataTypeNested * type_nested = dynamic_cast<const DataTypeNested *>(&type))
{
String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
streams[size_name] = new Stream(storage.files[size_name].data_file.path());
streams[size_name].reset(new Stream(storage.files[size_name].data_file.path()));
const NamesAndTypesList & columns = *type_nested->getNestedTypesList();
for (NamesAndTypesList::const_iterator it = columns.begin(); it != columns.end(); ++it)
addStream(DataTypeNested::concatenateNestedName(name, it->first), *it->second, level + 1);
}
else
streams[name] = new Stream(storage.files[name].data_file.path());
streams[name].reset(new Stream(storage.files[name].data_file.path()));
}
@ -200,21 +200,21 @@ void TinyLogBlockOutputStream::addStream(const String & name, const IDataType &
{
String size_name = DataTypeNested::extractNestedTableName(name) + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
if (!streams.count(size_name))
streams.insert(std::make_pair(size_name, new Stream(storage.files[size_name].data_file.path(), storage.max_compress_block_size)));
streams.emplace(size_name, std::unique_ptr<Stream>(new Stream(storage.files[size_name].data_file.path(), storage.max_compress_block_size)));
addStream(name, *type_arr->getNestedType(), level + 1);
}
else if (const DataTypeNested * type_nested = dynamic_cast<const DataTypeNested *>(&type))
{
String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
streams[size_name] = new Stream(storage.files[size_name].data_file.path(), storage.max_compress_block_size);
streams[size_name].reset(new Stream(storage.files[size_name].data_file.path(), storage.max_compress_block_size));
const NamesAndTypesList & columns = *type_nested->getNestedTypesList();
for (NamesAndTypesList::const_iterator it = columns.begin(); it != columns.end(); ++it)
addStream(DataTypeNested::concatenateNestedName(name, it->first), *it->second, level + 1);
}
else
streams[name] = new Stream(storage.files[name].data_file.path(), storage.max_compress_block_size);
streams[name].reset(new Stream(storage.files[name].data_file.path(), storage.max_compress_block_size));
}