remove some dead and commented code

This commit is contained in:
Alexander Tokmakov 2022-07-21 15:05:48 +02:00
parent 35eb6c9067
commit a8da5d96fc
30 changed files with 0 additions and 191 deletions

View File

@ -509,7 +509,6 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
if (referrals)
{
SCOPE_EXIT({
// ldap_value_free(referrals);
ber_memvfree(reinterpret_cast<void **>(referrals));
referrals = nullptr;
});

View File

@ -152,7 +152,6 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src)
auto & dst_column_host_name = typeid_cast<ColumnString &>(*mutable_columns[name_pos["host_name"]]);
auto & dst_array_current_time = typeid_cast<ColumnUInt32 &>(*mutable_columns[name_pos["current_time"]]).getData();
// auto & dst_array_thread_id = typeid_cast<ColumnUInt64 &>(*mutable_columns[name_pos["thread_id"]]).getData();
auto & dst_array_type = typeid_cast<ColumnInt8 &>(*mutable_columns[name_pos["type"]]).getData();
auto & dst_column_name = typeid_cast<ColumnString &>(*mutable_columns[name_pos["name"]]);
auto & dst_array_value = typeid_cast<ColumnInt64 &>(*mutable_columns[name_pos["value"]]).getData();

View File

@ -132,14 +132,12 @@ namespace
ColumnLowCardinality::ColumnLowCardinality(MutableColumnPtr && column_unique_, MutableColumnPtr && indexes_, bool is_shared)
: dictionary(std::move(column_unique_), is_shared), idx(std::move(indexes_))
{
// idx.check(getDictionary().size());
}
void ColumnLowCardinality::insert(const Field & x)
{
compactIfSharedDictionary();
idx.insertPosition(dictionary.getColumnUnique().uniqueInsert(x));
// idx.check(getDictionary().size());
}
void ColumnLowCardinality::insertDefault()
@ -167,15 +165,12 @@ void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n)
const auto & nested = *low_cardinality_src->getDictionary().getNestedColumn();
idx.insertPosition(dictionary.getColumnUnique().uniqueInsertFrom(nested, position));
}
// idx.check(getDictionary().size());
}
void ColumnLowCardinality::insertFromFullColumn(const IColumn & src, size_t n)
{
compactIfSharedDictionary();
idx.insertPosition(dictionary.getColumnUnique().uniqueInsertFrom(src, n));
// idx.check(getDictionary().size());
}
void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, size_t length)
@ -205,7 +200,6 @@ void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, si
auto inserted_indexes = dictionary.getColumnUnique().uniqueInsertRangeFrom(*used_keys, 0, used_keys->size());
idx.insertPositionsRange(*inserted_indexes->index(*sub_idx, 0), 0, length);
}
// idx.check(getDictionary().size());
}
void ColumnLowCardinality::insertRangeFromFullColumn(const IColumn & src, size_t start, size_t length)
@ -213,7 +207,6 @@ void ColumnLowCardinality::insertRangeFromFullColumn(const IColumn & src, size_t
compactIfSharedDictionary();
auto inserted_indexes = dictionary.getColumnUnique().uniqueInsertRangeFrom(src, start, length);
idx.insertPositionsRange(*inserted_indexes, 0, length);
// idx.check(getDictionary().size());
}
static void checkPositionsAreLimited(const IColumn & positions, UInt64 limit)
@ -254,14 +247,12 @@ void ColumnLowCardinality::insertRangeFromDictionaryEncodedColumn(const IColumn
compactIfSharedDictionary();
auto inserted_indexes = dictionary.getColumnUnique().uniqueInsertRangeFrom(keys, 0, keys.size());
idx.insertPositionsRange(*inserted_indexes->index(positions, 0), 0, positions.size());
// idx.check(getDictionary().size());
}
void ColumnLowCardinality::insertData(const char * pos, size_t length)
{
compactIfSharedDictionary();
idx.insertPosition(dictionary.getColumnUnique().uniqueInsertData(pos, length));
// idx.check(getDictionary().size());
}
StringRef ColumnLowCardinality::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
@ -276,7 +267,6 @@ const char * ColumnLowCardinality::deserializeAndInsertFromArena(const char * po
const char * new_pos;
idx.insertPosition(dictionary.getColumnUnique().uniqueDeserializeAndInsertFromArena(pos, new_pos));
// idx.check(getDictionary().size());
return new_pos;
}

View File

@ -273,14 +273,6 @@ llvm::Value * ColumnNullable::compileComparator(llvm::IRBuilderBase & builder, l
b.CreateCondBr(lhs_or_rhs_are_null, lhs_or_rhs_are_null_block, lhs_rhs_are_not_null_block);
// if (unlikely(lval_is_null || rval_is_null))
// {
// if (lval_is_null && rval_is_null)
// return 0;
// else
// return lval_is_null ? null_direction_hint : -null_direction_hint;
// }
b.SetInsertPoint(lhs_or_rhs_are_null_block);
auto * lhs_equals_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 0);
llvm::Value * lhs_and_rhs_are_null = b.CreateAnd(lhs_is_null_value, rhs_is_null_value);
@ -288,8 +280,6 @@ llvm::Value * ColumnNullable::compileComparator(llvm::IRBuilderBase & builder, l
llvm::Value * lhs_or_rhs_are_null_block_result = b.CreateSelect(lhs_and_rhs_are_null, lhs_equals_rhs_result, lhs_is_null_result);
b.CreateBr(join_block);
// getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint);
b.SetInsertPoint(lhs_rhs_are_not_null_block);
llvm::Value * lhs_rhs_are_not_null_block_result
= nested_column->compileComparator(builder, lhs_unwrapped_value, rhs_unwrapped_value, nan_direction_hint);

View File

@ -548,7 +548,6 @@ MutableColumnPtr ColumnUnique<ColumnType>::uniqueInsertRangeImpl(
}
}
// checkIndexes(*positions_column, column->size() + (overflowed_keys ? overflowed_keys->size() : 0));
return std::move(positions_column);
}

View File

@ -514,8 +514,6 @@ private:
return allocateFromFreeRegion(*free_region, size);
}
// std::cerr << "Requested size: " << size << "\n";
/// Evict something from cache and continue.
while (true)
{

View File

@ -122,9 +122,6 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor;
values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor;
// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n";
// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n";
/// We don't support too large changes.
if (values.amount_of_offset_change_value > 24 * 4)
values.amount_of_offset_change_value = 24 * 4;

View File

@ -74,7 +74,6 @@ public:
using key_type = Key;
using mapped_type = typename Cell::mapped_type;
using value_type = typename Cell::value_type;
using cell_type = Cell;
class Reader final : private Cell::State
{
@ -247,39 +246,6 @@ public:
}
}
/// Same, but return false if it's full.
bool ALWAYS_INLINE tryEmplace(Key x, iterator & it, bool & inserted)
{
Cell * res = findCell(x);
it = iteratorTo(res);
inserted = res == buf + m_size;
if (inserted)
{
if (res == buf + capacity)
return false;
new(res) Cell(x, *this);
++m_size;
}
return true;
}
/// Copy the cell from another hash table. It is assumed that there was no such key in the table yet.
void ALWAYS_INLINE insertUnique(const Cell * cell)
{
memcpy(&buf[m_size], cell, sizeof(*cell));
++m_size;
}
void ALWAYS_INLINE insertUnique(Key x)
{
new(&buf[m_size]) Cell(x, *this);
++m_size;
}
iterator ALWAYS_INLINE find(Key x) { return iteratorTo(findCell(x)); }
const_iterator ALWAYS_INLINE find(Key x) const { return iteratorTo(findCell(x)); }
@ -381,36 +347,3 @@ template
>
using SmallSet = SmallTable<Key, HashTableCell<Key, HashUnused>, capacity>;
template
<
typename Key,
typename Cell,
size_t capacity
>
class SmallMapTable : public SmallTable<Key, Cell, capacity>
{
public:
using key_type = Key;
using mapped_type = typename Cell::mapped_type;
using value_type = typename Cell::value_type;
using cell_type = Cell;
mapped_type & ALWAYS_INLINE operator[](Key x)
{
typename SmallMapTable::iterator it;
bool inserted;
this->emplace(x, it, inserted);
new (&it->getMapped()) mapped_type();
return it->getMapped();
}
};
template
<
typename Key,
typename Mapped,
size_t capacity
>
using SmallMap = SmallMapTable<Key, HashMapCell<Key, Mapped, HashUnused>, capacity>;

View File

@ -355,8 +355,6 @@ private:
template <size_t PASS>
static inline void radixSortMSDInternal(Element * arr, size_t size, size_t limit)
{
// std::cerr << PASS << ", " << size << ", " << limit << "\n";
/// The beginning of every i-1-th bucket. 0th element will be equal to 1st.
/// Last element will point to array end.
std::unique_ptr<Element *[]> prev_buckets{new Element*[HISTOGRAM_SIZE + 1]};

View File

@ -511,8 +511,6 @@ void SerializationLowCardinality::serializeBinaryBulkWithMultipleStreams(
/// Insert used_keys into global dictionary and update sub_index.
auto indexes_with_overflow = global_dictionary->uniqueInsertRangeWithOverflow(*keys, 0, keys->size(),
settings.low_cardinality_max_dictionary_size);
// size_t max_size = settings.low_cardinality_max_dictionary_size + indexes_with_overflow.overflowed_keys->size();
// ColumnLowCardinality::Index(indexes_with_overflow.indexes->getPtr()).check(max_size);
if (global_dictionary->size() > settings.low_cardinality_max_dictionary_size)
throw Exception("Got dictionary with size " + toString(global_dictionary->size()) +
@ -656,11 +654,6 @@ void SerializationLowCardinality::deserializeBinaryBulkWithMultipleStreams(
{
auto maps = mapIndexWithAdditionalKeys(*indexes_column, global_dictionary->size());
// ColumnLowCardinality::Index(maps.additional_keys_map->getPtr()).check(additional_keys->size());
// ColumnLowCardinality::Index(indexes_column->getPtr()).check(
// maps.dictionary_map->size() + maps.additional_keys_map->size());
auto used_keys = IColumn::mutate(global_dictionary->getNestedColumn()->index(*maps.dictionary_map, 0));
if (!maps.additional_keys_map->empty())

View File

@ -78,9 +78,6 @@ private:
template <typename ... Params, typename... Args>
void deserializeImpl(IColumn & column, DeserializeFunctionPtr<Params...> func, Args &&... args) const;
// template <typename Creator>
// static MutableColumnUniquePtr createColumnUniqueImpl(const IDataType & keys_type, const Creator & creator);
};
}

View File

@ -527,8 +527,6 @@ public:
throw Exception(ErrorCodes::CANNOT_IO_SUBMIT, "Cannot submit request for asynchronous IO on file {}", file_path);
}
// CurrentMetrics::Increment metric_increment_write{CurrentMetrics::Write};
io_event event;
while (io_getevents(aio_context.ctx, 1, 1, &event, nullptr) < 0)

View File

@ -275,8 +275,6 @@ void registerDictionarySourceJDBC(DictionarySourceFactory & factory)
bool /* created_from_ddl */) -> DictionarySourcePtr {
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
"Dictionary source of type `jdbc` is disabled until consistent support for nullable fields.");
// BridgeHelperPtr bridge = std::make_shared<XDBCBridgeHelper<JDBCBridgeMixin>>(config, context.getSettings().http_receive_timeout, config.getString(config_prefix + ".connection_string"));
// return std::make_unique<XDBCDictionarySource>(dict_struct, config, config_prefix + ".jdbc", sample_block, context, bridge);
};
factory.registerSource("jdbc", create_table_source);
}

View File

@ -74,7 +74,6 @@ void DiskWebServer::initialize(const String & uri_path) const
if (file_data.type == FileType::Directory)
{
directories_to_load.push_back(file_path);
// file_path = fs::path(file_path) / "";
}
file_path = file_path.substr(url.size());

View File

@ -63,12 +63,10 @@ struct ToStartOfWeekImpl
static inline UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone)
{
return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode);
// return time_zone.toFirstDayNumOfWeek(t, week_mode);
}
static inline UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone)
{
return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode);
// return time_zone.toFirstDayNumOfWeek(t, week_mode);
}
static inline UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone)
{

View File

@ -1091,8 +1091,6 @@ struct ConvertThroughParsing
static constexpr bool to_datetime64 = std::is_same_v<ToDataType, DataTypeDateTime64>;
// using ToFieldType = typename ToDataType::FieldType;
static bool isAllRead(ReadBuffer & in)
{
/// In case of FixedString, skip zero bytes at end.

View File

@ -50,8 +50,6 @@ void CascadeWriteBuffer::nextImpl()
}
set(curr_buffer->position(), curr_buffer->buffer().end() - curr_buffer->position());
// std::cerr << "CascadeWriteBuffer a count=" << count() << " bytes=" << bytes << " offset=" << offset()
// << " bytes+size=" << bytes + buffer().size() << "\n";
}

View File

@ -448,17 +448,7 @@ static ColumnWithTypeAndName executeActionForHeader(const ActionsDAG::Node * nod
{
case ActionsDAG::ActionType::FUNCTION:
{
// bool all_args_are_const = true;
// for (const auto & argument : arguments)
// if (typeid_cast<const ColumnConst *>(argument.column.get()) == nullptr)
// all_args_are_const = false;
res_column.column = node->function->execute(arguments, res_column.type, 0, true);
// if (!all_args_are_const)
// res_column.column = res_column.column->convertToFullColumnIfConst();
break;
}

View File

@ -2100,7 +2100,6 @@ std::shared_ptr<NotJoinedBlocks> HashJoin::getNonJoinedBlocks(const Block & left
if (multiple_disjuncts)
{
/// ... calculate `left_columns_count` ...
// throw DB::Exception(ErrorCodes::NOT_IMPLEMENTED, "TODO");
size_t left_columns_count = left_sample_block.columns();
auto non_joined = std::make_unique<NotJoinedHash<true>>(*this, max_block_size);
return std::make_shared<NotJoinedBlocks>(std::move(non_joined), result_sample_block, left_columns_count, table_join->leftToRightKeyRemap());

View File

@ -280,7 +280,6 @@ Block InterpreterSelectWithUnionQuery::getSampleBlock(const ASTPtr & query_ptr_,
void InterpreterSelectWithUnionQuery::buildQueryPlan(QueryPlan & query_plan)
{
// auto num_distinct_union = optimizeUnionList();
size_t num_plans = nested_interpreters.size();
const Settings & settings = context->getSettingsRef();

View File

@ -244,28 +244,6 @@ private:
}
};
// class AssemblyPrinter
// {
// public:
// explicit AssemblyPrinter(llvm::TargetMachine &target_machine_)
// : target_machine(target_machine_)
// {
// }
// void print(llvm::Module & module)
// {
// llvm::legacy::PassManager pass_manager;
// target_machine.Options.MCOptions.AsmVerbose = true;
// if (target_machine.addPassesToEmitFile(pass_manager, llvm::errs(), nullptr, llvm::CodeGenFileType::CGFT_AssemblyFile))
// throw Exception(ErrorCodes::CANNOT_COMPILE_CODE, "MachineCode cannot be printed");
// pass_manager.run(module);
// }
// private:
// llvm::TargetMachine & target_machine;
// };
/** MemoryManager for module.
* Keep total allocated size during RuntimeDyld linker execution.
*/

View File

@ -205,7 +205,6 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie
Stopwatch total_time_watch;
#endif
// auto & node = tasks.getNode(thread_num);
auto & context = tasks.getThreadContext(thread_num);
bool yield = false;

View File

@ -74,15 +74,6 @@ protected:
if (chunk)
{
auto block = getPort(PortKind::Main).getHeader();
// const auto & info = chunk.getChunkInfo();
// const auto * agg_info = typeid_cast<const AggregatedChunkInfo *>(info.get());
// if (agg_info)
// {
// block.info.bucket_num = agg_info->bucket_num;
// block.info.is_overflows = agg_info->is_overflows;
// }
block.setColumns(chunk.detachColumns());
writer.write(block);
}

View File

@ -126,7 +126,6 @@ void RemoteSource::onCancel()
{
was_query_canceled = true;
query_executor->cancel(&read_context);
// is_async_state = false;
}
void RemoteSource::onUpdatePorts()
@ -135,7 +134,6 @@ void RemoteSource::onUpdatePorts()
{
was_query_canceled = true;
query_executor->finish(&read_context);
// is_async_state = false;
}
}

View File

@ -323,7 +323,6 @@ QueryPipelineBuilderPtr QueryPipelineBuilder::mergePipelines(
left->pipe.processors.emplace_back(transform);
left->pipe.processors.insert(left->pipe.processors.end(), right->pipe.processors.begin(), right->pipe.processors.end());
// left->pipe.holder = std::move(right->pipe.holder);
left->pipe.header = left->pipe.output_ports.front()->getHeader();
left->pipe.max_parallel_streams = std::max(left->pipe.max_parallel_streams, right->pipe.max_parallel_streams);
return left;

View File

@ -66,7 +66,6 @@ HiveMetastoreClient::HiveTableMetadataPtr HiveMetastoreClient::getTableMetadata(
};
tryCallHiveClient(client_call);
// bool update_cache = shouldUpdateTableMetadata(db_name, table_name, partitions);
String cache_key = getCacheKey(db_name, table_name);
HiveTableMetadataPtr metadata = table_metadata_cache.get(cache_key);

View File

@ -603,22 +603,6 @@ String IMergeTreeDataPart::getColumnNameWithMinimumCompressedSize(
return *minimum_size_column;
}
// String IMergeTreeDataPart::getFullPath() const
// {
// if (relative_path.empty())
// throw Exception("Part relative_path cannot be empty. It's bug.", ErrorCodes::LOGICAL_ERROR);
// return fs::path(storage.getFullPathOnDisk(volume->getDisk())) / (parent_part ? parent_part->relative_path : "") / relative_path / "";
// }
// String IMergeTreeDataPart::getRelativePath() const
// {
// if (relative_path.empty())
// throw Exception("Part relative_path cannot be empty. It's bug.", ErrorCodes::LOGICAL_ERROR);
// return fs::path(storage.relative_data_path) / (parent_part ? parent_part->relative_path : "") / relative_path / "";
// }
void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checksums, bool check_consistency)
{
assertOnDisk();

View File

@ -30,9 +30,6 @@ public:
}
protected:
// using SerializationState = ISerialization::SerializeBinaryBulkStatePtr;
// ISerialization::OutputStreamGetter createStreamGetter(const String & name, WrittenOffsetColumns & offset_columns);
/// Remove all columns marked expired in data_part. Also, clears checksums
/// and columns array. Return set of removed files names.

View File

@ -596,7 +596,6 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c
const auto & projections = global_ctx->metadata_snapshot->getProjections();
// tasks_for_projections.reserve(projections.size());
for (const auto & projection : projections)
{

View File

@ -1584,8 +1584,6 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry)
return true; /// NOTE Deletion from `virtual_parts` is not done, but it is only necessary for merge.
}
// bool do_fetch = false;
switch (entry.type)
{
case LogEntry::ATTACH_PART:
@ -1593,7 +1591,6 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry)
[[fallthrough]];
case LogEntry::GET_PART:
return executeFetch(entry);
// do_fetch = true;
case LogEntry::MERGE_PARTS:
throw Exception(ErrorCodes::LOGICAL_ERROR, "Merge has to be executed by another function");
case LogEntry::MUTATE_PART:
@ -1609,8 +1606,6 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry)
default:
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected log entry type: {}", static_cast<int>(entry.type));
}
// return true;
}