Remove unused buggy code

This commit is contained in:
Raúl Marín 2024-09-19 18:17:04 +02:00
parent b4504f20bf
commit 7521fd2dcb
6 changed files with 0 additions and 144 deletions

View File

@ -447,51 +447,6 @@ public:
}
}
void readAndMerge(DB::ReadBuffer & rb)
{
UInt8 rhs_skip_degree = 0;
DB::readBinaryLittleEndian(rhs_skip_degree, rb);
if (rhs_skip_degree > skip_degree)
{
skip_degree = rhs_skip_degree;
rehash();
}
size_t rhs_size = 0;
DB::readVarUInt(rhs_size, rb);
if (rhs_size > UNIQUES_HASH_MAX_SIZE)
throw Poco::Exception("Cannot read UniquesHashSet: too large size_degree.");
if ((1ULL << size_degree) < rhs_size)
{
UInt8 new_size_degree = std::max(UNIQUES_HASH_SET_INITIAL_SIZE_DEGREE, static_cast<int>(log2(rhs_size - 1)) + 2);
resize(new_size_degree);
}
if (rhs_size <= 1)
{
for (size_t i = 0; i < rhs_size; ++i)
{
HashValue x = 0;
DB::readBinaryLittleEndian(x, rb);
insertHash(x);
}
}
else
{
auto hs = std::make_unique<HashValue[]>(rhs_size);
rb.readStrict(reinterpret_cast<char *>(hs.get()), rhs_size * sizeof(HashValue));
for (size_t i = 0; i < rhs_size; ++i)
{
DB::transformEndianness<std::endian::native, std::endian::little>(hs[i]);
insertHash(hs[i]);
}
}
}
static void skip(DB::ReadBuffer & rb)
{
size_t size = 0;

View File

@ -177,48 +177,6 @@ public:
}
}
void readAndMerge(DB::ReadBuffer & in)
{
auto container_type = getContainerType();
/// If readAndMerge is called with an empty state, just deserialize
/// the state is specified as a parameter.
if ((container_type == details::ContainerType::SMALL) && small.empty())
{
read(in);
return;
}
UInt8 v;
readBinary(v, in);
auto rhs_container_type = static_cast<details::ContainerType>(v);
auto max_container_type = details::max(container_type, rhs_container_type);
if (container_type != max_container_type)
{
if (max_container_type == details::ContainerType::MEDIUM)
toMedium();
else if (max_container_type == details::ContainerType::LARGE)
toLarge();
}
if (rhs_container_type == details::ContainerType::SMALL)
{
typename Small::Reader reader(in);
while (reader.next())
insert(reader.get());
}
else if (rhs_container_type == details::ContainerType::MEDIUM)
{
typename Medium::Reader reader(in);
while (reader.next())
insert(reader.get());
}
else if (rhs_container_type == details::ContainerType::LARGE)
getContainer<Large>().readAndMerge(in);
}
void write(DB::WriteBuffer & out) const
{
auto container_type = getContainerType();

View File

@ -16,11 +16,4 @@ public:
if (Base::buf[i].isZero(*this) && !rhs.buf[i].isZero(*this))
new (&Base::buf[i]) Cell(rhs.buf[i]);
}
/// NOTE: Currently this method isn't used. When it does, the ReadBuffer should
/// contain the Key explicitly.
// void readAndMerge(DB::ReadBuffer & rb)
// {
// }
};

View File

@ -55,26 +55,6 @@ public:
if (!rhs.buf[i].isZero(*this))
this->insert(rhs.buf[i].getValue());
}
void readAndMerge(DB::ReadBuffer & rb)
{
Cell::State::read(rb);
size_t new_size = 0;
DB::readVarUInt(new_size, rb);
if (new_size > 100'000'000'000)
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_ARRAY_SIZE, "The size of serialized hash table is suspiciously large: {}", new_size);
this->resize(new_size);
for (size_t i = 0; i < new_size; ++i)
{
Cell x;
x.read(rb);
this->insert(x.getValue());
}
}
};

View File

@ -353,18 +353,6 @@ public:
}
}
void readAndMerge(DB::ReadBuffer & in)
{
typename RankStore::Reader reader(in);
while (reader.next())
{
const auto & data = reader.get();
update(data.first, data.second);
}
in.ignore(sizeof(DenominatorCalculatorType) + sizeof(ZerosCounterType));
}
static void skip(DB::ReadBuffer & in)
{
in.ignore(sizeof(RankStore) + sizeof(DenominatorCalculatorType) + sizeof(ZerosCounterType));

View File

@ -113,24 +113,6 @@ public:
small.read(in);
}
void readAndMerge(DB::ReadBuffer & in)
{
bool is_rhs_large;
readBinary(is_rhs_large, in);
if (!isLarge() && is_rhs_large)
toLarge();
if (!is_rhs_large)
{
typename Small::Reader reader(in);
while (reader.next())
insert(reader.get());
}
else
large->readAndMerge(in);
}
void write(DB::WriteBuffer & out) const
{
writeBinary(isLarge(), out);