Remove debug print

This commit is contained in:
alesapin 2019-03-27 19:50:01 +03:00
parent 5528edea93
commit e449760fde
12 changed files with 28 additions and 27 deletions

View File

@ -195,7 +195,7 @@ void CollapsingSortedBlockInputStream::merge(MutableColumns & merged_columns, st
}
/// Write data for last primary key.
insertRows(merged_columns, current_block_granularity, stop_condition);
insertRows(merged_columns, /*some_granularity*/ 0, stop_condition);
finished = true;
}

View File

@ -132,7 +132,7 @@ Block MergeTreeBaseSelectBlockInputStream::readFromPart()
{
if (reader->getColumns().empty())
{
std::cerr << "EMPTY COLUMNS\n";
//std::cerr << "EMPTY COLUMNS\n";
task->range_reader = MergeTreeRangeReader(
pre_reader.get(), nullptr,
prewhere_info->alias_actions, prewhere_info->prewhere_actions,
@ -141,12 +141,12 @@ Block MergeTreeBaseSelectBlockInputStream::readFromPart()
}
else
{
std::cerr << "MORE INTERESTING COLUMNS\n";
//std::cerr << "MORE INTERESTING COLUMNS\n";
MergeTreeRangeReader * pre_reader_ptr = nullptr;
if (pre_reader != nullptr)
{
std::cerr << "SETTING PREREADER\n";
std::cerr << "PreReader is NULL:" << (pre_reader == nullptr) << std::endl;
//std::cerr << "SETTING PREREADER\n";
//std::cerr << "PreReader is NULL:" << (pre_reader == nullptr) << std::endl;
task->pre_range_reader = MergeTreeRangeReader(
pre_reader.get(), nullptr,
prewhere_info->alias_actions, prewhere_info->prewhere_actions,
@ -155,7 +155,7 @@ Block MergeTreeBaseSelectBlockInputStream::readFromPart()
pre_reader_ptr = &task->pre_range_reader;
}
std::cerr << "Reader is NULL:" << (reader == nullptr) << std::endl;
//std::cerr << "Reader is NULL:" << (reader == nullptr) << std::endl;
task->range_reader = MergeTreeRangeReader(
reader.get(), pre_reader_ptr, nullptr, nullptr,
nullptr, &task->ordered_names, true, false, true);

View File

@ -1479,7 +1479,7 @@ MergeTreeData::AlterDataPartTransactionPtr MergeTreeData::alterDataPart(
transaction->new_columns.writeText(columns_file);
transaction->rename_map["columns.txt.tmp"] = "columns.txt";
}
std::cerr << "ALTER FINISHED\n";
//std::cerr << "ALTER FINISHED\n";
return transaction;
}

View File

@ -767,7 +767,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor
rows_sources_read_buf.seek(0, 0);
ColumnGathererStream column_gathered_stream(column_name, column_part_streams, rows_sources_read_buf);
std::cerr << "TOINDEXGRANULARITY MARKS COUNT:" << to.getIndexGranularity().getMarksCount() << std::endl;
//std::cerr << "TOINDEXGRANULARITY MARKS COUNT:" << to.getIndexGranularity().getMarksCount() << std::endl;
MergedColumnOnlyOutputStream column_to(
data,
column_gathered_stream.getHeader(),
@ -878,7 +878,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
new_data_part->is_temp = true;
String new_part_tmp_path = new_data_part->getFullPath();
std::cerr << "NEW TEMP PART:" << new_part_tmp_path << std::endl;
//std::cerr << "NEW TEMP PART:" << new_part_tmp_path << std::endl;
/// Note: this is done before creating input streams, because otherwise data.data_parts_mutex
/// (which is locked in data.getTotalActiveSizeInBytes()) is locked after part->columns_lock

View File

@ -217,15 +217,15 @@ String MergeTreeDataPart::getColumnNameWithMinumumCompressedSize() const
for (const auto & column : storage_columns)
{
std::cerr << "Searching for column:" << column.name << std::endl;
//std::cerr << "Searching for column:" << column.name << std::endl;
if (!hasColumnFiles(column.name))
{
std::cerr << "No column files:" << column.name << std::endl;
//std::cerr << "No column files:" << column.name << std::endl;
continue;
}
const auto size = getColumnSize(column.name, *column.type).data_compressed;
std::cerr << "Column size:" <<size<<std::endl;
//std::cerr << "Column size:" <<size<<std::endl;
if (size < minimum_size)
{
minimum_size = size;
@ -518,14 +518,14 @@ void MergeTreeDataPart::loadIndexGranularity()
/// old version of marks with static index granularity
if (!granularity_info.is_adaptive)
{
std::cerr << "(1)SET MARKS SIZE FOR:" << marks_file_path << " TO:" << granularity_info.mark_size_in_bytes << std::endl;
//std::cerr << "(1)SET MARKS SIZE FOR:" << marks_file_path << " TO:" << granularity_info.mark_size_in_bytes << std::endl;
size_t marks_count = marks_file_size / granularity_info.mark_size_in_bytes;
std::cerr << "Marks file size:" << marks_file_size << " Marks count:" << marks_count << std::endl;
//std::cerr << "Marks file size:" << marks_file_size << " Marks count:" << marks_count << std::endl;
index_granularity.resizeWithFixedGranularity(marks_count, granularity_info.fixed_index_granularity); /// all the same
}
else
{
std::cerr << "(2)SET MARKS SIZE FOR:" << marks_file_path << " TO:" << granularity_info.mark_size_in_bytes << std::endl;
//std::cerr << "(2)SET MARKS SIZE FOR:" << marks_file_path << " TO:" << granularity_info.mark_size_in_bytes << std::endl;
ReadBufferFromFile buffer(marks_file_path, marks_file_size, -1);
while (!buffer.eof())
{
@ -842,12 +842,12 @@ bool MergeTreeDataPart::hasColumnFiles(const String & column) const
/// That's Ok under assumption that files exist either for all or for no streams.
String prefix = getFullPath();
std::cerr << "ColumnPrefix:" << prefix << std::endl;
//std::cerr << "ColumnPrefix:" << prefix << std::endl;
String escaped_column = escapeForFileName(column);
std::cerr << "Escaped name:" << escaped_column << std::endl;
std::cerr << "Marks file extension:" << storage.index_granularity_info.marks_file_extension << std::endl;
//std::cerr << "Escaped name:" << escaped_column << std::endl;
//std::cerr << "Marks file extension:" << storage.index_granularity_info.marks_file_extension << std::endl;
return Poco::File(prefix + escaped_column + ".bin").exists()
&& Poco::File(prefix + escaped_column + storage.index_granularity_info.marks_file_extension).exists();
}

View File

@ -159,7 +159,7 @@ BlockInputStreams MergeTreeDataSelectExecutor::readFromParts(
const unsigned num_streams,
const PartitionIdToMaxBlock * max_block_numbers_to_read) const
{
std::cerr << "START READING FROM PARTS\n";
//std::cerr << "START READING FROM PARTS\n";
size_t part_index = 0;
/// If query contains restrictions on the virtual column `_part` or `_part_index`, select only parts suitable for it.

View File

@ -581,8 +581,8 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::startReadingChain(size_t
{
if (stream.isFinished())
{
std::cerr << "STREAM IS FINISHED\n";
std::cerr << "MAKRSCOUNT:" << merge_tree_reader->data_part->index_granularity.getMarksCount() << std::endl;
//std::cerr << "STREAM IS FINISHED\n";
//std::cerr << "MAKRSCOUNT:" << merge_tree_reader->data_part->index_granularity.getMarksCount() << std::endl;
result.addRows(stream.finalize(result.block));
stream = Stream(ranges.back().begin, ranges.back().end, merge_tree_reader);
result.addRange(ranges.back());

View File

@ -28,7 +28,7 @@ MergeTreeReadPool::MergeTreeReadPool(
column_names{column_names}, do_not_steal_tasks{do_not_steal_tasks},
predict_block_size_bytes{preferred_block_size_bytes > 0}, prewhere_info{prewhere_info}, parts_ranges{parts}
{
std::cerr << "PARTS SIZE:" << parts.size() << std::endl;
//std::cerr << "PARTS SIZE:" << parts.size() << std::endl;
avg_parts_granularity = getAvgGranularityForAllPartsRanges(parts);
/// reverse from right-to-left to left-to-right

View File

@ -30,11 +30,11 @@ MergeTreeThreadSelectBlockInputStream::MergeTreeThreadSelectBlockInputStream(
if (max_block_size_rows)
{
size_t avg_granularity = pool->getAvgGranularityForReadingParts();
std::cerr << "AVG GRANULARITY:" << avg_granularity << std::endl;
//std::cerr << "AVG GRANULARITY:" << avg_granularity << std::endl;
min_marks_to_read = (min_marks_to_read_ * avg_granularity + max_block_size_rows - 1)
/ max_block_size_rows * max_block_size_rows / avg_granularity;
std::cerr << "MIN MARKS TO READ:" << min_marks_to_read << std::endl;
//std::cerr << "MIN MARKS TO READ:" << min_marks_to_read << std::endl;
}
else
min_marks_to_read = min_marks_to_read_;

View File

@ -345,8 +345,8 @@ MergeTreeData::DataPart::Checksums checkDataPart(
}, settings.path);
size_t rows_after_mark = adaptive_index_granularity.getMarkRows(mark_num);
std::cerr << "rows after mark:" << rows_after_mark << std::endl;
std::cerr << "mark_num:" << mark_num << std::endl;
//std::cerr << "rows after mark:" << rows_after_mark << std::endl;
//std::cerr << "mark_num:" << mark_num << std::endl;
++mark_num;
/// Read index_granularity rows from column.

View File

@ -51,6 +51,8 @@ SELECT count() FROM test.merge_tree WHERE toUInt64(x) IN (0, 0);
DROP TABLE test.merge_tree;
SELECT '----00804----';
SET max_rows_to_read = 0;
SET force_primary_key = 0;
DROP TABLE IF EXISTS test.large_alter_table;
DROP TABLE IF EXISTS test.store_of_hash;