Cleanups [#CLICKHOUSE-2838].

This commit is contained in:
Alexey Milovidov 2017-12-01 16:32:37 +03:00
parent 241e0638b7
commit d4089695ad
5 changed files with 1 additions and 32 deletions

View File

@ -464,8 +464,6 @@ Block MergeTreeBaseBlockInputStream::readFromPart()
}
space_left -= rows_was_read;
std::cerr << "rows_was_read: " << rows_was_read << ", space_left: " << space_left << "\n";
}
/// In the case of isCancelled.

View File

@ -1,4 +1,5 @@
#pragma once
#include <DataStreams/IProfilingBlockInputStream.h>
#include <Storages/MergeTree/MergeTreeBlockReadUtils.h>
#include <Storages/MergeTree/MergeTreeData.h>

View File

@ -34,7 +34,6 @@ MergeTreeRangeReader MergeTreeRangeReader::getFutureState(size_t rows_to_read) c
size_t MergeTreeRangeReader::read(Block & res, size_t max_rows_to_read)
{
size_t rows_to_read = numPendingRows();
std::cerr << "rows_to_read: " << rows_to_read << "\n";
rows_to_read = std::min(rows_to_read, max_rows_to_read);
if (rows_to_read == 0)
{
@ -44,8 +43,6 @@ size_t MergeTreeRangeReader::read(Block & res, size_t max_rows_to_read)
auto read_rows = merge_tree_reader.get().readRows(current_mark, continue_reading, rows_to_read, res);
std::cerr << "read_rows: " << read_rows << "\n";
if (read_rows && read_rows < rows_to_read)
is_reading_finished = true;

View File

@ -152,16 +152,9 @@ size_t MergeTreeReader::readRows(size_t from_mark, bool continue_reading, size_t
}
if (!append && column.column->size())
{
std::cerr << "Inserting " << column.name << "\n";
res.insert(std::move(column));
}
else
std::cerr << "Not inserting " << column.name << "\n";
}
std::cerr << res.dumpStructure() << "\n";
/// NOTE: positions for all streams must be kept in sync. In particular, even if for some streams there are no rows to be read,
/// you must ensure that no seeks are skipped and at this point they all point to to_mark.
}
@ -365,15 +358,11 @@ void MergeTreeReader::addStreams(const String & name, const IDataType & type, co
{
String stream_name = IDataType::getFileNameForStream(name, substream_path);
std::cerr << "Adding stream " << stream_name << "\n";
if (streams.count(stream_name))
return;
bool data_file_exists = Poco::File(path + stream_name + DATA_FILE_EXTENSION).exists();
std::cerr << "File exists: " << data_file_exists << "\n";
/** If data file is missing then we will not try to open it.
* It is necessary since it allows to add new column to structure of the table without creating new files for old parts.
*/
@ -384,8 +373,6 @@ void MergeTreeReader::addStreams(const String & name, const IDataType & type, co
path + stream_name, DATA_FILE_EXTENSION, data_part->marks_count,
all_mark_ranges, mark_cache, save_marks_in_cache,
uncompressed_cache, aio_threshold, max_read_buffer_size, profile_callback, clock_type));
std::cerr << "Added stream " << stream_name << "\n";
};
type.enumerateStreams(callback, {});
@ -399,24 +386,16 @@ void MergeTreeReader::readData(
{
IDataType::InputStreamGetter stream_getter = [&] (const IDataType::SubstreamPath & path) -> ReadBuffer *
{
for (const auto & elem : path)
std::cerr << elem.type << "\n";
std::cerr << "\n";
/// If offsets for arrays have already been read.
if (!with_offsets && !path.empty() && path.back().type == IDataType::Substream::ArraySizes)
return nullptr;
String stream_name = IDataType::getFileNameForStream(name, path);
std::cerr << stream_name << "\n";
auto it = streams.find(stream_name);
if (it == streams.end())
return nullptr;
std::cerr << "!!!\n";
Stream & stream = *it->second;
if (!continue_reading)

View File

@ -275,10 +275,6 @@ void LogBlockInputStream::readData(const String & name, const IDataType & type,
if (storage.files.end() == file_it)
throw Exception("Logical error: no information about file " + stream_name + " in StorageLog", ErrorCodes::LOGICAL_ERROR);
std::cerr << "Stream: " << stream_name << "\n";
std::cerr << "Mark number: " << mark_number << "\n";
std::cerr << "Offset: " << file_it->second.marks[mark_number].offset << "\n";
auto it = streams.try_emplace(stream_name,
file_it->second.data_file.path(),
mark_number
@ -578,8 +574,6 @@ BlockInputStreams StorageLog::read(
size_t rows_begin = mark_begin ? marks[mark_begin - 1].rows : 0;
size_t rows_end = mark_end ? marks[mark_end - 1].rows : 0;
std::cerr << mark_begin << ", " << mark_end << ", " << rows_begin << ", " << rows_end << "\n";
res.emplace_back(std::make_shared<LogBlockInputStream>(
max_block_size,
column_names,