mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
dbms: add ComplexKeyCacheDictionary, not yet complete [#METR-17328]
Requires a way of deallocating keys in memory Arena (a new type of arena, actually), MySQLDictionarySource not yet capable of requesting complex keys.
This commit is contained in:
parent
7798c45b5b
commit
0fd1fc9cdc
@ -40,7 +40,8 @@ public:
|
||||
max_connections, host, port, db, user, password,
|
||||
"ClickHouseDictionarySource")
|
||||
},
|
||||
load_all_query{composeLoadAllQuery()}
|
||||
load_all_query{composeLoadAllQuery()},
|
||||
key_tuple_definition{composeKeyTupleDefinition()}
|
||||
{}
|
||||
|
||||
/// copy-constructor is provided in order to support cloneability
|
||||
@ -69,11 +70,13 @@ public:
|
||||
|
||||
BlockInputStreamPtr loadIds(const std::vector<std::uint64_t> & ids) override
|
||||
{
|
||||
const auto query = composeLoadIdsQuery(ids);
|
||||
return createStreamForSelectiveLoad(composeLoadIdsQuery(ids));
|
||||
}
|
||||
|
||||
if (is_local)
|
||||
return executeQuery(query, context, true).in;
|
||||
return new RemoteBlockInputStream{pool.get(), query, nullptr};
|
||||
BlockInputStreamPtr loadKeys(
|
||||
const ConstColumnPlainPtrs & key_columns, const std::vector<std::size_t> & requested_rows) override
|
||||
{
|
||||
return createStreamForSelectiveLoad(composeLoadKeysQuery(key_columns, requested_rows));
|
||||
}
|
||||
|
||||
bool isModified() const override { return true; }
|
||||
@ -183,8 +186,8 @@ private:
|
||||
|
||||
std::string composeLoadIdsQuery(const std::vector<std::uint64_t> ids)
|
||||
{
|
||||
if (dict_struct.key)
|
||||
throw Exception{"Complex key not supported", ErrorCodes::UNSUPPORTED_METHOD};
|
||||
if (!dict_struct.id)
|
||||
throw Exception{"Simple key required for method", ErrorCodes::UNSUPPORTED_METHOD};
|
||||
|
||||
std::string query;
|
||||
|
||||
@ -248,6 +251,115 @@ private:
|
||||
return query;
|
||||
}
|
||||
|
||||
std::string composeLoadKeysQuery(
|
||||
const ConstColumnPlainPtrs & key_columns, const std::vector<std::size_t> & requested_rows)
|
||||
{
|
||||
if (!dict_struct.key)
|
||||
throw Exception{"Composite key required for method", ErrorCodes::UNSUPPORTED_METHOD};
|
||||
|
||||
std::string query;
|
||||
|
||||
{
|
||||
WriteBufferFromString out{query};
|
||||
writeString("SELECT ", out);
|
||||
|
||||
auto first = true;
|
||||
for (const auto & key_or_attribute : boost::join(*dict_struct.key, dict_struct.attributes))
|
||||
{
|
||||
if (!first)
|
||||
writeString(", ", out);
|
||||
|
||||
first = false;
|
||||
|
||||
if (!key_or_attribute.expression.empty())
|
||||
{
|
||||
writeParenthesisedString(key_or_attribute.expression, out);
|
||||
writeString(" AS ", out);
|
||||
}
|
||||
|
||||
writeProbablyBackQuotedString(key_or_attribute.name, out);
|
||||
}
|
||||
|
||||
writeString(" FROM ", out);
|
||||
if (!db.empty())
|
||||
{
|
||||
writeProbablyBackQuotedString(db, out);
|
||||
writeChar('.', out);
|
||||
}
|
||||
writeProbablyBackQuotedString(table, out);
|
||||
|
||||
writeString(" WHERE ", out);
|
||||
|
||||
if (!where.empty())
|
||||
{
|
||||
writeString(where, out);
|
||||
writeString(" AND ", out);
|
||||
}
|
||||
|
||||
writeString(key_tuple_definition, out);
|
||||
writeString(" IN (", out);
|
||||
|
||||
first = true;
|
||||
for (const auto row : requested_rows)
|
||||
{
|
||||
if (!first)
|
||||
writeString(", ", out);
|
||||
|
||||
first = false;
|
||||
composeKeyTuple(key_columns, row, out);
|
||||
}
|
||||
|
||||
writeString(");", out);
|
||||
}
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
std::string composeKeyTupleDefinition() const
|
||||
{
|
||||
std::string result{"("};
|
||||
|
||||
auto first = true;
|
||||
for (const auto & key : *dict_struct.key)
|
||||
{
|
||||
if (!first)
|
||||
result += ", ";
|
||||
|
||||
first = false;
|
||||
result += key.name;
|
||||
}
|
||||
|
||||
result += ")";
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void composeKeyTuple(const ConstColumnPlainPtrs & key_columns, const std::size_t row, WriteBuffer & out) const
|
||||
{
|
||||
writeString("(", out);
|
||||
|
||||
const auto keys_size = key_columns.size();
|
||||
auto first = true;
|
||||
for (const auto i : ext::range(0, keys_size))
|
||||
{
|
||||
if (!first)
|
||||
writeString(", ", out);
|
||||
|
||||
first = false;
|
||||
const auto & value = (*key_columns[i])[row];
|
||||
(*dict_struct.key)[i].type->serializeTextQuoted(value, out);
|
||||
}
|
||||
|
||||
writeString(")", out);
|
||||
}
|
||||
|
||||
BlockInputStreamPtr createStreamForSelectiveLoad(const std::string query)
|
||||
{
|
||||
if (is_local)
|
||||
return executeQuery(query, context, true).in;
|
||||
return new RemoteBlockInputStream{pool.get(), query, nullptr};
|
||||
}
|
||||
|
||||
const DictionaryStructure dict_struct;
|
||||
const std::string host;
|
||||
const UInt16 port;
|
||||
@ -261,6 +373,7 @@ private:
|
||||
const bool is_local;
|
||||
std::unique_ptr<ConnectionPool> pool;
|
||||
const std::string load_all_query;
|
||||
const std::string key_tuple_definition;
|
||||
};
|
||||
|
||||
}
|
||||
|
844
dbms/include/DB/Dictionaries/ComplexKeyCacheDictionary.h
Normal file
844
dbms/include/DB/Dictionaries/ComplexKeyCacheDictionary.h
Normal file
@ -0,0 +1,844 @@
|
||||
#pragma once
|
||||
|
||||
#include <DB/Dictionaries/IDictionary.h>
|
||||
#include <DB/Dictionaries/IDictionarySource.h>
|
||||
#include <DB/Dictionaries/DictionaryStructure.h>
|
||||
#include <DB/Common/HashTable/HashMap.h>
|
||||
#include <DB/Columns/ColumnString.h>
|
||||
#include <DB/Core/StringRef.h>
|
||||
#include <ext/scope_guard.hpp>
|
||||
#include <ext/bit_cast.hpp>
|
||||
#include <ext/range.hpp>
|
||||
#include <ext/map.hpp>
|
||||
#include <Poco/RWLock.h>
|
||||
#include <cmath>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <tuple>
|
||||
#include <DB/DataStreams/NullBlockInputStream.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ComplexKeyCacheDictionary final : public IDictionaryBase
|
||||
{
|
||||
public:
|
||||
ComplexKeyCacheDictionary(const std::string & name, const DictionaryStructure & dict_struct,
|
||||
DictionarySourcePtr source_ptr, const DictionaryLifetime dict_lifetime,
|
||||
const std::size_t size)
|
||||
: name{name}, dict_struct(dict_struct), source_ptr{std::move(source_ptr)}, dict_lifetime(dict_lifetime),
|
||||
key_description{createKeyDescription(dict_struct)}, size{round_up_to_power_of_two(size)}, cells{this->size}
|
||||
{
|
||||
if (!this->source_ptr->supportsSelectiveLoad())
|
||||
throw Exception{
|
||||
name + ": source cannot be used with ComplexKeyCacheDictionary",
|
||||
ErrorCodes::UNSUPPORTED_METHOD
|
||||
};
|
||||
|
||||
createAttributes();
|
||||
}
|
||||
|
||||
ComplexKeyCacheDictionary(const ComplexKeyCacheDictionary & other)
|
||||
: ComplexKeyCacheDictionary{other.name, other.dict_struct, other.source_ptr->clone(), other.dict_lifetime, other.size}
|
||||
{}
|
||||
|
||||
std::string getKeyDescription() const { return key_description; };
|
||||
|
||||
std::exception_ptr getCreationException() const override { return {}; }
|
||||
|
||||
std::string getName() const override { return name; }
|
||||
|
||||
std::string getTypeName() const override { return "ComplexKeyCache"; }
|
||||
|
||||
std::size_t getBytesAllocated() const override { return bytes_allocated; }
|
||||
|
||||
std::size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getHitRate() const override
|
||||
{
|
||||
return static_cast<double>(hit_count.load(std::memory_order_acquire)) /
|
||||
query_count.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
std::size_t getElementCount() const override { return element_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getLoadFactor() const override
|
||||
{
|
||||
return static_cast<double>(element_count.load(std::memory_order_relaxed)) / size;
|
||||
}
|
||||
|
||||
bool isCached() const override { return true; }
|
||||
|
||||
DictionaryPtr clone() const override { return std::make_unique<ComplexKeyCacheDictionary>(*this); }
|
||||
|
||||
const IDictionarySource * getSource() const override { return source_ptr.get(); }
|
||||
|
||||
const DictionaryLifetime & getLifetime() const override { return dict_lifetime; }
|
||||
|
||||
const DictionaryStructure & getStructure() const override { return dict_struct; }
|
||||
|
||||
std::chrono::time_point<std::chrono::system_clock> getCreationTime() const override
|
||||
{
|
||||
return creation_time;
|
||||
}
|
||||
|
||||
bool isInjective(const std::string & attribute_name) const override
|
||||
{
|
||||
return dict_struct.attributes[&getAttribute(attribute_name) - attributes.data()].injective;
|
||||
}
|
||||
|
||||
#define DECLARE_MULTIPLE_GETTER(TYPE)\
|
||||
void get##TYPE(\
|
||||
const std::string & attribute_name, const ConstColumnPlainPtrs & key_columns, const DataTypes & key_types,\
|
||||
PODArray<TYPE> & out) const\
|
||||
{\
|
||||
validateKeyTypes(key_types);\
|
||||
\
|
||||
auto & attribute = getAttribute(attribute_name);\
|
||||
if (attribute.type != AttributeUnderlyingType::TYPE)\
|
||||
throw Exception{\
|
||||
name + ": type mismatch: attribute " + attribute_name + " has type " + toString(attribute.type),\
|
||||
ErrorCodes::TYPE_MISMATCH\
|
||||
};\
|
||||
\
|
||||
getItems<TYPE>(attribute, key_columns, out);\
|
||||
}
|
||||
DECLARE_MULTIPLE_GETTER(UInt8)
|
||||
DECLARE_MULTIPLE_GETTER(UInt16)
|
||||
DECLARE_MULTIPLE_GETTER(UInt32)
|
||||
DECLARE_MULTIPLE_GETTER(UInt64)
|
||||
DECLARE_MULTIPLE_GETTER(Int8)
|
||||
DECLARE_MULTIPLE_GETTER(Int16)
|
||||
DECLARE_MULTIPLE_GETTER(Int32)
|
||||
DECLARE_MULTIPLE_GETTER(Int64)
|
||||
DECLARE_MULTIPLE_GETTER(Float32)
|
||||
DECLARE_MULTIPLE_GETTER(Float64)
|
||||
#undef DECLARE_MULTIPLE_GETTER
|
||||
void getString(
|
||||
const std::string & attribute_name, const ConstColumnPlainPtrs & key_columns, const DataTypes & key_types,
|
||||
ColumnString * out) const
|
||||
{
|
||||
validateKeyTypes(key_types);
|
||||
|
||||
auto & attribute = getAttribute(attribute_name);
|
||||
if (attribute.type != AttributeUnderlyingType::String)
|
||||
throw Exception{
|
||||
name + ": type mismatch: attribute " + attribute_name + " has type " + toString(attribute.type),
|
||||
ErrorCodes::TYPE_MISMATCH
|
||||
};
|
||||
|
||||
getItems(attribute, key_columns, out);
|
||||
}
|
||||
|
||||
#define DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(TYPE)\
|
||||
void get##TYPE(\
|
||||
const std::string & attribute_name, const ConstColumnPlainPtrs & key_columns, const DataTypes & key_types,\
|
||||
const PODArray<TYPE> & def, PODArray<TYPE> & out) const\
|
||||
{\
|
||||
validateKeyTypes(key_types);\
|
||||
\
|
||||
auto & attribute = getAttribute(attribute_name);\
|
||||
if (attribute.type != AttributeUnderlyingType::TYPE)\
|
||||
throw Exception{\
|
||||
name + ": type mismatch: attribute " + attribute_name + " has type " + toString(attribute.type),\
|
||||
ErrorCodes::TYPE_MISMATCH\
|
||||
};\
|
||||
\
|
||||
getItems<TYPE>(attribute, key_columns, out, &def);\
|
||||
}
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(UInt8)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(UInt16)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(UInt32)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(UInt64)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(Int8)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(Int16)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(Int32)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(Int64)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(Float32)
|
||||
DECLARE_MULTIPLE_GETTER_WITH_DEFAULT(Float64)
|
||||
#undef DECLARE_MULTIPLE_GETTER_WITH_DEFAULT
|
||||
void getString(
|
||||
const std::string & attribute_name, const ConstColumnPlainPtrs & key_columns, const DataTypes & key_types,
|
||||
const ColumnString * const def, ColumnString * const out) const
|
||||
{
|
||||
validateKeyTypes(key_types);
|
||||
|
||||
auto & attribute = getAttribute(attribute_name);
|
||||
if (attribute.type != AttributeUnderlyingType::String)
|
||||
throw Exception{
|
||||
name + ": type mismatch: attribute " + attribute_name + " has type " + toString(attribute.type),
|
||||
ErrorCodes::TYPE_MISMATCH
|
||||
};
|
||||
|
||||
getItems(attribute, key_columns, out, def);
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename Value> using MapType = HashMapWithSavedHash<StringRef, Value, StringRefHash>;
|
||||
template <typename Value> using ContainerType = Value[];
|
||||
template <typename Value> using ContainerPtrType = std::unique_ptr<ContainerType<Value>>;
|
||||
|
||||
struct cell_metadata_t final
|
||||
{
|
||||
using time_point_t = std::chrono::system_clock::time_point;
|
||||
using time_point_rep_t = time_point_t::rep;
|
||||
using time_point_urep_t = std::make_unsigned_t<time_point_rep_t>;
|
||||
|
||||
static constexpr std::uint64_t EXPIRES_AT_MASK = std::numeric_limits<time_point_rep_t>::max();
|
||||
static constexpr std::uint64_t IS_DEFAULT_MASK = ~EXPIRES_AT_MASK;
|
||||
|
||||
StringRef key;
|
||||
decltype(StringRefHash{}(key)) hash;
|
||||
/// Stores both expiration time and `is_default` flag in the most significant bit
|
||||
time_point_urep_t data;
|
||||
|
||||
/// Sets expiration time, resets `is_default` flag to false
|
||||
time_point_t expiresAt() const { return ext::safe_bit_cast<time_point_t>(data & EXPIRES_AT_MASK); }
|
||||
void setExpiresAt(const time_point_t & t) { data = ext::safe_bit_cast<time_point_urep_t>(t); }
|
||||
|
||||
bool isDefault() const { return (data & IS_DEFAULT_MASK) == IS_DEFAULT_MASK; }
|
||||
void setDefault() { data |= IS_DEFAULT_MASK; }
|
||||
};
|
||||
|
||||
struct attribute_t final
|
||||
{
|
||||
AttributeUnderlyingType type;
|
||||
std::tuple<
|
||||
UInt8, UInt16, UInt32, UInt64,
|
||||
Int8, Int16, Int32, Int64,
|
||||
Float32, Float64,
|
||||
String> null_values;
|
||||
std::tuple<
|
||||
ContainerPtrType<UInt8>, ContainerPtrType<UInt16>, ContainerPtrType<UInt32>, ContainerPtrType<UInt64>,
|
||||
ContainerPtrType<Int8>, ContainerPtrType<Int16>, ContainerPtrType<Int32>, ContainerPtrType<Int64>,
|
||||
ContainerPtrType<Float32>, ContainerPtrType<Float64>,
|
||||
ContainerPtrType<StringRef>> arrays;
|
||||
};
|
||||
|
||||
void createAttributes()
|
||||
{
|
||||
const auto size = dict_struct.attributes.size();
|
||||
attributes.reserve(size);
|
||||
|
||||
bytes_allocated += size * sizeof(cell_metadata_t);
|
||||
bytes_allocated += size * sizeof(attributes.front());
|
||||
|
||||
for (const auto & attribute : dict_struct.attributes)
|
||||
{
|
||||
attribute_index_by_name.emplace(attribute.name, attributes.size());
|
||||
attributes.push_back(createAttributeWithType(attribute.underlying_type, attribute.null_value));
|
||||
|
||||
if (attribute.hierarchical)
|
||||
throw Exception{
|
||||
name + ": hierarchical attributes not supported for dictionary of type " + getTypeName(),
|
||||
ErrorCodes::TYPE_MISMATCH
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
attribute_t createAttributeWithType(const AttributeUnderlyingType type, const Field & null_value)
|
||||
{
|
||||
attribute_t attr{type};
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case AttributeUnderlyingType::UInt8:
|
||||
std::get<UInt8>(attr.null_values) = null_value.get<UInt64>();
|
||||
std::get<ContainerPtrType<UInt8>>(attr.arrays) = std::make_unique<ContainerType<UInt8>>(size);
|
||||
bytes_allocated += size * sizeof(UInt8);
|
||||
break;
|
||||
case AttributeUnderlyingType::UInt16:
|
||||
std::get<UInt16>(attr.null_values) = null_value.get<UInt64>();
|
||||
std::get<ContainerPtrType<UInt16>>(attr.arrays) = std::make_unique<ContainerType<UInt16>>(size);
|
||||
bytes_allocated += size * sizeof(UInt16);
|
||||
break;
|
||||
case AttributeUnderlyingType::UInt32:
|
||||
std::get<UInt32>(attr.null_values) = null_value.get<UInt64>();
|
||||
std::get<ContainerPtrType<UInt32>>(attr.arrays) = std::make_unique<ContainerType<UInt32>>(size);
|
||||
bytes_allocated += size * sizeof(UInt32);
|
||||
break;
|
||||
case AttributeUnderlyingType::UInt64:
|
||||
std::get<UInt64>(attr.null_values) = null_value.get<UInt64>();
|
||||
std::get<ContainerPtrType<UInt64>>(attr.arrays) = std::make_unique<ContainerType<UInt64>>(size);
|
||||
bytes_allocated += size * sizeof(UInt64);
|
||||
break;
|
||||
case AttributeUnderlyingType::Int8:
|
||||
std::get<Int8>(attr.null_values) = null_value.get<Int64>();
|
||||
std::get<ContainerPtrType<Int8>>(attr.arrays) = std::make_unique<ContainerType<Int8>>(size);
|
||||
bytes_allocated += size * sizeof(Int8);
|
||||
break;
|
||||
case AttributeUnderlyingType::Int16:
|
||||
std::get<Int16>(attr.null_values) = null_value.get<Int64>();
|
||||
std::get<ContainerPtrType<Int16>>(attr.arrays) = std::make_unique<ContainerType<Int16>>(size);
|
||||
bytes_allocated += size * sizeof(Int16);
|
||||
break;
|
||||
case AttributeUnderlyingType::Int32:
|
||||
std::get<Int32>(attr.null_values) = null_value.get<Int64>();
|
||||
std::get<ContainerPtrType<Int32>>(attr.arrays) = std::make_unique<ContainerType<Int32>>(size);
|
||||
bytes_allocated += size * sizeof(Int32);
|
||||
break;
|
||||
case AttributeUnderlyingType::Int64:
|
||||
std::get<Int64>(attr.null_values) = null_value.get<Int64>();
|
||||
std::get<ContainerPtrType<Int64>>(attr.arrays) = std::make_unique<ContainerType<Int64>>(size);
|
||||
bytes_allocated += size * sizeof(Int64);
|
||||
break;
|
||||
case AttributeUnderlyingType::Float32:
|
||||
std::get<Float32>(attr.null_values) = null_value.get<Float64>();
|
||||
std::get<ContainerPtrType<Float32>>(attr.arrays) = std::make_unique<ContainerType<Float32>>(size);
|
||||
bytes_allocated += size * sizeof(Float32);
|
||||
break;
|
||||
case AttributeUnderlyingType::Float64:
|
||||
std::get<Float64>(attr.null_values) = null_value.get<Float64>();
|
||||
std::get<ContainerPtrType<Float64>>(attr.arrays) = std::make_unique<ContainerType<Float64>>(size);
|
||||
bytes_allocated += size * sizeof(Float64);
|
||||
break;
|
||||
case AttributeUnderlyingType::String:
|
||||
std::get<String>(attr.null_values) = null_value.get<String>();
|
||||
std::get<ContainerPtrType<StringRef>>(attr.arrays) = std::make_unique<ContainerType<StringRef>>(size);
|
||||
bytes_allocated += size * sizeof(StringRef);
|
||||
break;
|
||||
}
|
||||
|
||||
return attr;
|
||||
}
|
||||
|
||||
static std::string createKeyDescription(const DictionaryStructure & dict_struct)
|
||||
{
|
||||
std::ostringstream out;
|
||||
|
||||
out << '(';
|
||||
|
||||
auto first = true;
|
||||
for (const auto & key : *dict_struct.key)
|
||||
{
|
||||
if (!first)
|
||||
out << ", ";
|
||||
|
||||
first = false;
|
||||
|
||||
out << key.type->getName();
|
||||
}
|
||||
|
||||
out << ')';
|
||||
|
||||
return out.str();
|
||||
}
|
||||
|
||||
void validateKeyTypes(const DataTypes & key_types) const
|
||||
{
|
||||
if (key_types.size() != dict_struct.key->size())
|
||||
throw Exception{
|
||||
"Key structure does not match, expected " + key_description,
|
||||
ErrorCodes::TYPE_MISMATCH
|
||||
};
|
||||
|
||||
for (const auto i : ext::range(0, key_types.size()))
|
||||
{
|
||||
const auto & expected_type = (*dict_struct.key)[i].type->getName();
|
||||
const auto & actual_type = key_types[i]->getName();
|
||||
|
||||
if (expected_type != actual_type)
|
||||
throw Exception{
|
||||
"Key type at position " + std::to_string(i) + " does not match, expected " + expected_type +
|
||||
", found " + actual_type,
|
||||
ErrorCodes::TYPE_MISMATCH
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void getItems(
|
||||
attribute_t & attribute, const ConstColumnPlainPtrs & key_columns, PODArray<T> & out,
|
||||
const PODArray<T> * const def = nullptr) const
|
||||
{
|
||||
/// Mapping: <key> -> { all indices `i` of `key_columns` such that `key_columns[i]` = <key> }
|
||||
MapType<std::vector<std::size_t>> outdated_keys;
|
||||
auto & attribute_array = std::get<ContainerPtrType<T>>(attribute.arrays);
|
||||
|
||||
const auto rows = key_columns.front()->size();
|
||||
const auto keys_size = dict_struct.key->size();
|
||||
StringRefs keys(keys_size);
|
||||
Arena temporary_keys_pool;
|
||||
PODArray<StringRef> keys_array(rows);
|
||||
|
||||
{
|
||||
const Poco::ScopedReadRWLock read_lock{rw_lock};
|
||||
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
/// fetch up-to-date values, decide which ones require update
|
||||
for (const auto row : ext::range(0, rows))
|
||||
{
|
||||
const auto key = placeKeysInPool(row, key_columns, keys, temporary_keys_pool);
|
||||
keys_array[row] = key;
|
||||
const auto hash = StringRefHash{}(key);
|
||||
const auto cell_idx = hash & (size - 1);
|
||||
const auto & cell = cells[cell_idx];
|
||||
|
||||
/** cell should be updated if either:
|
||||
* 1. keys (or hash) do not match,
|
||||
* 2. cell has expired,
|
||||
* 3. explicit defaults were specified and cell was set default. */
|
||||
if (cell.hash != hash || cell.key != key || cell.expiresAt() < now || (def && cell.isDefault()))
|
||||
outdated_keys[key].push_back(row);
|
||||
else
|
||||
out[row] = attribute_array[cell_idx];
|
||||
}
|
||||
}
|
||||
|
||||
query_count.fetch_add(rows, std::memory_order_relaxed);
|
||||
hit_count.fetch_add(rows - outdated_keys.size(), std::memory_order_release);
|
||||
|
||||
if (outdated_keys.empty())
|
||||
return;
|
||||
|
||||
std::vector<std::size_t> required_rows(outdated_keys.size());
|
||||
std::transform(std::begin(outdated_keys), std::end(outdated_keys), std::begin(required_rows),
|
||||
[] (auto & pair) { return pair.second.front(); });
|
||||
|
||||
/// request new values;
|
||||
update(key_columns, keys_array, required_rows, [&] (const auto key, const auto cell_idx) {
|
||||
const auto attribute_value = attribute_array[cell_idx];
|
||||
|
||||
/// set missing values to out
|
||||
for (const auto out_idx : outdated_keys[key])
|
||||
out[out_idx] = attribute_value;
|
||||
}, [&] (const auto key, const auto cell_idx) {
|
||||
auto & attribute_value = attribute_array[cell_idx];
|
||||
|
||||
if (def)
|
||||
attribute_value = (*def)[outdated_keys[key].front()];
|
||||
|
||||
/// set missing values to out
|
||||
for (const auto out_idx : outdated_keys[key])
|
||||
out[out_idx] = attribute_value;
|
||||
});
|
||||
}
|
||||
|
||||
void getItems(
|
||||
attribute_t & attribute, const ConstColumnPlainPtrs & key_columns, ColumnString * out,
|
||||
const ColumnString * const def = nullptr) const
|
||||
{
|
||||
const auto rows = key_columns.front()->size();
|
||||
/// save on some allocations
|
||||
out->getOffsets().reserve(rows);
|
||||
|
||||
const auto keys_size = dict_struct.key->size();
|
||||
StringRefs keys(keys_size);
|
||||
Arena temporary_keys_pool;
|
||||
|
||||
auto & attribute_array = std::get<ContainerPtrType<StringRef>>(attribute.arrays);
|
||||
|
||||
auto found_outdated_values = false;
|
||||
|
||||
/// perform optimistic version, fallback to pessimistic if failed
|
||||
{
|
||||
const Poco::ScopedReadRWLock read_lock{rw_lock};
|
||||
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
/// fetch up-to-date values, discard on fail
|
||||
for (const auto row : ext::range(0, rows))
|
||||
{
|
||||
const auto key = placeKeysInPool(row, key_columns, keys, temporary_keys_pool);
|
||||
SCOPE_EXIT(temporary_keys_pool.rollback(key.size));
|
||||
const auto hash = StringRefHash{}(key);
|
||||
const auto cell_idx = hash & (size - 1);
|
||||
const auto & cell = cells[cell_idx];
|
||||
|
||||
if (cell.hash != hash || cell.key != key || cell.expiresAt() < now || (def && cell.isDefault()))
|
||||
{
|
||||
found_outdated_values = true;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto string_ref = attribute_array[cell_idx];
|
||||
out->insertData(string_ref.data, string_ref.size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// optimistic code completed successfully
|
||||
if (!found_outdated_values)
|
||||
{
|
||||
query_count.fetch_add(rows, std::memory_order_relaxed);
|
||||
hit_count.fetch_add(rows, std::memory_order_release);
|
||||
return;
|
||||
}
|
||||
|
||||
/// now onto the pessimistic one, discard possible partial results from the optimistic path
|
||||
out->getChars().resize_assume_reserved(0);
|
||||
out->getOffsets().resize_assume_reserved(0);
|
||||
|
||||
/// Mapping: <key> -> { all indices `i` of `key_columns` such that `key_columns[i]` = <key> }
|
||||
MapType<std::vector<std::size_t>> outdated_keys;
|
||||
/// we are going to store every string separately
|
||||
MapType<String> map;
|
||||
PODArray<StringRef> keys_array(rows);
|
||||
|
||||
std::size_t total_length = 0;
|
||||
{
|
||||
const Poco::ScopedReadRWLock read_lock{rw_lock};
|
||||
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
for (const auto row : ext::range(0, rows))
|
||||
{
|
||||
const auto key = placeKeysInPool(row, key_columns, keys, temporary_keys_pool);
|
||||
keys_array[row] = key;
|
||||
const auto hash = StringRefHash{}(key);
|
||||
const auto cell_idx = hash & (size - 1);
|
||||
const auto & cell = cells[cell_idx];
|
||||
|
||||
if (cell.hash != hash || cell.key != key || cell.expiresAt() < now || (def && cell.isDefault()))
|
||||
outdated_keys[key].push_back(row);
|
||||
else
|
||||
{
|
||||
const auto string_ref = attribute_array[cell_idx];
|
||||
map[key] = String{string_ref};
|
||||
total_length += string_ref.size + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
query_count.fetch_add(rows, std::memory_order_relaxed);
|
||||
hit_count.fetch_add(rows - outdated_keys.size(), std::memory_order_release);
|
||||
|
||||
/// request new values
|
||||
if (!outdated_keys.empty())
|
||||
{
|
||||
std::vector<std::size_t> required_rows(outdated_keys.size());
|
||||
std::transform(std::begin(outdated_keys), std::end(outdated_keys), std::begin(required_rows),
|
||||
[] (auto & pair) { return pair.second.front(); });
|
||||
|
||||
update(key_columns, keys_array, required_rows, [&] (const auto key, const auto cell_idx) {
|
||||
const auto attribute_value = attribute_array[cell_idx];
|
||||
|
||||
map[key] = String{attribute_value};
|
||||
total_length += (attribute_value.size + 1) * outdated_keys[key].size();
|
||||
}, [&] (const auto key, const auto cell_idx) {
|
||||
auto attribute_value = def ? def->getDataAt(outdated_keys[key].front()) : attribute_array[cell_idx];
|
||||
map[key] = String{attribute_value};
|
||||
total_length += (attribute_value.size + 1) * outdated_keys[key].size();
|
||||
});
|
||||
}
|
||||
|
||||
out->getChars().reserve(total_length);
|
||||
|
||||
const auto & null_value = std::get<String>(attribute.null_values);
|
||||
|
||||
for (const auto key : keys_array)
|
||||
{
|
||||
const auto it = map.find(key);
|
||||
/// @note check seems redundant, null_values are explicitly stored in the `map`
|
||||
const auto & string = it != map.end() ? it->second : null_value;
|
||||
out->insertData(string.data(), string.size());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename PresentKeyHandler, typename AbsentKeyHandler>
|
||||
void update(
|
||||
const ConstColumnPlainPtrs & in_key_columns, const PODArray<StringRef> & in_keys,
|
||||
const std::vector<std::size_t> & in_requested_rows, PresentKeyHandler && on_cell_updated,
|
||||
AbsentKeyHandler && on_key_not_found) const
|
||||
{
|
||||
auto stream = source_ptr->loadKeys(in_key_columns, in_requested_rows);
|
||||
stream->readPrefix();
|
||||
|
||||
MapType<UInt8> remaining_keys{in_requested_rows.size()};
|
||||
for (const auto row : in_requested_rows)
|
||||
remaining_keys.insert({ in_keys[row], 0 });
|
||||
|
||||
std::uniform_int_distribution<std::uint64_t> distribution{
|
||||
dict_lifetime.min_sec,
|
||||
dict_lifetime.max_sec
|
||||
};
|
||||
|
||||
const Poco::ScopedWriteRWLock write_lock{rw_lock};
|
||||
|
||||
const auto keys_size = dict_struct.key->size();
|
||||
StringRefs keys(keys_size);
|
||||
|
||||
const auto attributes_size = attributes.size();
|
||||
|
||||
while (const auto block = stream->read())
|
||||
{
|
||||
/// cache column pointers
|
||||
const auto key_columns = ext::map<ConstColumnPlainPtrs>(ext::range(0, keys_size),
|
||||
[&] (const std::size_t attribute_idx) {
|
||||
return block.getByPosition(attribute_idx).column.get();
|
||||
});
|
||||
|
||||
const auto attribute_columns = ext::map<ConstColumnPlainPtrs>(ext::range(0, attributes_size),
|
||||
[&] (const std::size_t attribute_idx) {
|
||||
return block.getByPosition(keys_size + attribute_idx).column.get();
|
||||
});
|
||||
|
||||
const auto rows = block.rowsInFirstColumn();
|
||||
|
||||
for (const auto row : ext::range(0, rows))
|
||||
{
|
||||
auto key = placeKeysInPool(row, key_columns, keys, keys_pool);
|
||||
const auto hash = StringRefHash{}(key);
|
||||
const auto cell_idx = hash & (size - 1);
|
||||
auto & cell = cells[cell_idx];
|
||||
|
||||
for (const auto attribute_idx : ext::range(0, attributes.size()))
|
||||
{
|
||||
const auto & attribute_column = *attribute_columns[attribute_idx];
|
||||
auto & attribute = attributes[attribute_idx];
|
||||
|
||||
setAttributeValue(attribute, cell_idx, attribute_column[row]);
|
||||
}
|
||||
|
||||
/// if cell id is zero and zero does not map to this cell, then the cell is unused
|
||||
if (cell.key == StringRef{} && cell_idx != zero_cell_idx)
|
||||
element_count.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
/// handle memory allocated for old key
|
||||
if (key == cell.key)
|
||||
{
|
||||
/// new key is same as old key, rollback memory allocated for the new key
|
||||
keys_pool.rollback(key.size);
|
||||
key = cell.key;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// new key is different from the old one
|
||||
/// @todo need a pool capable of deallocations
|
||||
/*keys_pool.dealloc(cell.key.data, cell.key.size);*/
|
||||
cell.key = key;
|
||||
}
|
||||
|
||||
cell.hash = hash;
|
||||
|
||||
if (dict_lifetime.min_sec != 0 && dict_lifetime.max_sec != 0)
|
||||
cell.setExpiresAt(std::chrono::system_clock::now() + std::chrono::seconds{distribution(rnd_engine)});
|
||||
else
|
||||
cell.setExpiresAt(std::chrono::time_point<std::chrono::system_clock>::max());
|
||||
|
||||
/// inform caller
|
||||
on_cell_updated(key, cell_idx);
|
||||
/// mark corresponding id as found
|
||||
remaining_keys[key] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
stream->readSuffix();
|
||||
|
||||
/// Check which ids have not been found and require setting null_value
|
||||
for (const auto key_found_pair : remaining_keys)
|
||||
{
|
||||
if (key_found_pair.second)
|
||||
continue;
|
||||
|
||||
auto key = key_found_pair.first;
|
||||
const auto hash = StringRefHash{}(key);
|
||||
const auto cell_idx = hash & (size - 1);
|
||||
auto & cell = cells[cell_idx];
|
||||
|
||||
/// Set null_value for each attribute
|
||||
for (auto & attribute : attributes)
|
||||
setDefaultAttributeValue(attribute, cell_idx);
|
||||
|
||||
/// Check if cell had not been occupied before and increment element counter if it hadn't
|
||||
if (cell.key == StringRef{} && cell_idx != zero_cell_idx)
|
||||
element_count.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
if (key == cell.key)
|
||||
key = cell.key;
|
||||
else
|
||||
{
|
||||
/// @todo need a pool capable of deallocations
|
||||
/*keys_pool.dealloc(cell.key.data, cell.key.size);*/
|
||||
/// copy key from temporary pool to `keys_pool`
|
||||
key = copyKeyToPool(key, keys_pool);
|
||||
cell.key = key;
|
||||
}
|
||||
|
||||
cell.hash = hash;
|
||||
|
||||
if (dict_lifetime.min_sec != 0 && dict_lifetime.max_sec != 0)
|
||||
cell.setExpiresAt(std::chrono::system_clock::now() + std::chrono::seconds{distribution(rnd_engine)});
|
||||
else
|
||||
cell.setExpiresAt(std::chrono::time_point<std::chrono::system_clock>::max());
|
||||
|
||||
cell.setDefault();
|
||||
|
||||
/// inform caller that the cell has not been found
|
||||
on_key_not_found(key, cell_idx);
|
||||
}
|
||||
}
|
||||
|
||||
std::uint64_t getCellIdx(const StringRef key) const
|
||||
{
|
||||
const auto hash = StringRefHash{}(key);
|
||||
const auto idx = hash & (size - 1);
|
||||
return idx;
|
||||
}
|
||||
|
||||
void setDefaultAttributeValue(attribute_t & attribute, const std::size_t idx) const
|
||||
{
|
||||
switch (attribute.type)
|
||||
{
|
||||
case AttributeUnderlyingType::UInt8: std::get<ContainerPtrType<UInt8>>(attribute.arrays)[idx] = std::get<UInt8>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::UInt16: std::get<ContainerPtrType<UInt16>>(attribute.arrays)[idx] = std::get<UInt16>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::UInt32: std::get<ContainerPtrType<UInt32>>(attribute.arrays)[idx] = std::get<UInt32>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::UInt64: std::get<ContainerPtrType<UInt64>>(attribute.arrays)[idx] = std::get<UInt64>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::Int8: std::get<ContainerPtrType<Int8>>(attribute.arrays)[idx] = std::get<Int8>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::Int16: std::get<ContainerPtrType<Int16>>(attribute.arrays)[idx] = std::get<Int16>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::Int32: std::get<ContainerPtrType<Int32>>(attribute.arrays)[idx] = std::get<Int32>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::Int64: std::get<ContainerPtrType<Int64>>(attribute.arrays)[idx] = std::get<Int64>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::Float32: std::get<ContainerPtrType<Float32>>(attribute.arrays)[idx] = std::get<Float32>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::Float64: std::get<ContainerPtrType<Float64>>(attribute.arrays)[idx] = std::get<Float64>(attribute.null_values); break;
|
||||
case AttributeUnderlyingType::String:
|
||||
{
|
||||
const auto & null_value_ref = std::get<String>(attribute.null_values);
|
||||
auto & string_ref = std::get<ContainerPtrType<StringRef>>(attribute.arrays)[idx];
|
||||
if (string_ref.data == null_value_ref.data())
|
||||
return;
|
||||
|
||||
if (string_ref.size != 0)
|
||||
bytes_allocated -= string_ref.size + 1;
|
||||
const std::unique_ptr<const char[]> deleter{string_ref.data};
|
||||
|
||||
string_ref = StringRef{null_value_ref};
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void setAttributeValue(attribute_t & attribute, const std::size_t idx, const Field & value) const
|
||||
{
|
||||
switch (attribute.type)
|
||||
{
|
||||
case AttributeUnderlyingType::UInt8: std::get<ContainerPtrType<UInt8>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt16: std::get<ContainerPtrType<UInt16>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt32: std::get<ContainerPtrType<UInt32>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt64: std::get<ContainerPtrType<UInt64>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::Int8: std::get<ContainerPtrType<Int8>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int16: std::get<ContainerPtrType<Int16>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int32: std::get<ContainerPtrType<Int32>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int64: std::get<ContainerPtrType<Int64>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Float32: std::get<ContainerPtrType<Float32>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::Float64: std::get<ContainerPtrType<Float64>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::String:
|
||||
{
|
||||
const auto & string = value.get<String>();
|
||||
auto & string_ref = std::get<ContainerPtrType<StringRef>>(attribute.arrays)[idx];
|
||||
const auto & null_value_ref = std::get<String>(attribute.null_values);
|
||||
if (string_ref.data != null_value_ref.data())
|
||||
{
|
||||
if (string_ref.size != 0)
|
||||
bytes_allocated -= string_ref.size + 1;
|
||||
/// avoid explicit delete, let unique_ptr handle it
|
||||
const std::unique_ptr<const char[]> deleter{string_ref.data};
|
||||
}
|
||||
|
||||
const auto size = string.size();
|
||||
if (size != 0)
|
||||
{
|
||||
auto string_ptr = std::make_unique<char[]>(size + 1);
|
||||
std::copy(string.data(), string.data() + size + 1, string_ptr.get());
|
||||
string_ref = StringRef{string_ptr.release(), size};
|
||||
bytes_allocated += size + 1;
|
||||
}
|
||||
else
|
||||
string_ref = {};
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
attribute_t & getAttribute(const std::string & attribute_name) const
|
||||
{
|
||||
const auto it = attribute_index_by_name.find(attribute_name);
|
||||
if (it == std::end(attribute_index_by_name))
|
||||
throw Exception{
|
||||
name + ": no such attribute '" + attribute_name + "'",
|
||||
ErrorCodes::BAD_ARGUMENTS
|
||||
};
|
||||
|
||||
return attributes[it->second];
|
||||
}
|
||||
|
||||
static std::size_t round_up_to_power_of_two(std::size_t n)
|
||||
{
|
||||
--n;
|
||||
n |= n >> 1;
|
||||
n |= n >> 2;
|
||||
n |= n >> 4;
|
||||
n |= n >> 8;
|
||||
n |= n >> 16;
|
||||
n |= n >> 32;
|
||||
++n;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static std::uint64_t getSeed()
|
||||
{
|
||||
timespec ts;
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
return ts.tv_nsec ^ getpid();
|
||||
}
|
||||
|
||||
static StringRef placeKeysInPool(
|
||||
const std::size_t row, const ConstColumnPlainPtrs & key_columns, StringRefs & keys, Arena & pool)
|
||||
{
|
||||
const auto keys_size = key_columns.size();
|
||||
size_t sum_keys_size{};
|
||||
for (const auto i : ext::range(0, keys_size))
|
||||
{
|
||||
keys[i] = key_columns[i]->getDataAtWithTerminatingZero(row);
|
||||
sum_keys_size += keys[i].size;
|
||||
}
|
||||
|
||||
const auto res = pool.alloc(sum_keys_size);
|
||||
auto place = res;
|
||||
|
||||
for (size_t j = 0; j < keys_size; ++j)
|
||||
{
|
||||
memcpy(place, keys[j].data, keys[j].size);
|
||||
place += keys[j].size;
|
||||
}
|
||||
|
||||
return { res, sum_keys_size };
|
||||
}
|
||||
|
||||
static StringRef copyKeyToPool(const StringRef key, Arena & pool)
|
||||
{
|
||||
const auto res = pool.alloc(key.size);
|
||||
memcpy(res, key.data, key.size);
|
||||
|
||||
return { res, key.size };
|
||||
}
|
||||
|
||||
const std::string name;
|
||||
const DictionaryStructure dict_struct;
|
||||
const DictionarySourcePtr source_ptr;
|
||||
const DictionaryLifetime dict_lifetime;
|
||||
const std::string key_description;
|
||||
|
||||
mutable Poco::RWLock rw_lock;
|
||||
const std::size_t size;
|
||||
const std::uint64_t zero_cell_idx{getCellIdx(StringRef{})};
|
||||
std::map<std::string, std::size_t> attribute_index_by_name;
|
||||
mutable std::vector<attribute_t> attributes;
|
||||
mutable std::vector<cell_metadata_t> cells;
|
||||
mutable Arena keys_pool;
|
||||
|
||||
mutable std::mt19937_64 rnd_engine{getSeed()};
|
||||
|
||||
mutable std::size_t bytes_allocated = 0;
|
||||
mutable std::atomic<std::size_t> element_count{0};
|
||||
mutable std::atomic<std::size_t> hit_count{0};
|
||||
mutable std::atomic<std::size_t> query_count{0};
|
||||
|
||||
const std::chrono::time_point<std::chrono::system_clock> creation_time = std::chrono::system_clock::now();
|
||||
};
|
||||
|
||||
}
|
@ -41,10 +41,13 @@ public:
|
||||
|
||||
BlockInputStreamPtr loadIds(const std::vector<std::uint64_t> & ids) override
|
||||
{
|
||||
throw Exception{
|
||||
"Method unsupported",
|
||||
ErrorCodes::NOT_IMPLEMENTED
|
||||
};
|
||||
throw Exception{"Method unsupported", ErrorCodes::NOT_IMPLEMENTED};
|
||||
}
|
||||
|
||||
BlockInputStreamPtr loadKeys(
|
||||
const ConstColumnPlainPtrs & key_columns, const std::vector<std::size_t> & requested_rows) override
|
||||
{
|
||||
throw Exception{"Method unsupported", ErrorCodes::NOT_IMPLEMENTED};
|
||||
}
|
||||
|
||||
bool isModified() const override { return getLastModification() > last_modification; }
|
||||
|
@ -27,6 +27,11 @@ public:
|
||||
/// returns an input stream with the data for a collection of identifiers
|
||||
virtual BlockInputStreamPtr loadIds(const std::vector<std::uint64_t> & ids) = 0;
|
||||
|
||||
/** returns an input stream with the data for a collection of composite keys.
|
||||
* `requested_rows` contains indices of all rows containing unique keys. */
|
||||
virtual BlockInputStreamPtr loadKeys(
|
||||
const ConstColumnPlainPtrs & key_columns, const std::vector<std::size_t> & requested_rows) = 0;
|
||||
|
||||
/// indicates whether the source has been modified since last load* operation
|
||||
virtual bool isModified() const = 0;
|
||||
|
||||
|
@ -111,6 +111,12 @@ public:
|
||||
};
|
||||
}
|
||||
|
||||
BlockInputStreamPtr loadKeys(
|
||||
const ConstColumnPlainPtrs & key_columns, const std::vector<std::size_t> & requested_rows) override
|
||||
{
|
||||
throw Exception{"Method unsupported", ErrorCodes::NOT_IMPLEMENTED};
|
||||
}
|
||||
|
||||
/// @todo: for MongoDB, modification date can somehow be determined from the `_id` object field
|
||||
bool isModified() const override { return false; }
|
||||
|
||||
|
@ -58,6 +58,15 @@ public:
|
||||
return new MySQLBlockInputStream{pool.Get(), query, sample_block, max_block_size};
|
||||
}
|
||||
|
||||
BlockInputStreamPtr loadKeys(
|
||||
const ConstColumnPlainPtrs & key_columns, const std::vector<std::size_t> & requested_rows) override
|
||||
{
|
||||
/// Здесь не логгируем и не обновляем время модификации, так как запрос может быть большим, и часто задаваться.
|
||||
|
||||
const auto query = composeLoadKeysQuery(key_columns, requested_rows);
|
||||
return new MySQLBlockInputStream{pool.Get(), query, sample_block, max_block_size};
|
||||
}
|
||||
|
||||
bool isModified() const override
|
||||
{
|
||||
if (dont_check_update_time)
|
||||
@ -309,6 +318,13 @@ private:
|
||||
return query;
|
||||
}
|
||||
|
||||
std::string composeLoadKeysQuery(
|
||||
const ConstColumnPlainPtrs & key_columns, const std::vector<std::size_t> & requested_rows)
|
||||
{
|
||||
/// @todo implement
|
||||
return {};
|
||||
}
|
||||
|
||||
const DictionaryStructure dict_struct;
|
||||
const std::string db;
|
||||
const std::string table;
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <DB/Dictionaries/HashedDictionary.h>
|
||||
#include <DB/Dictionaries/CacheDictionary.h>
|
||||
#include <DB/Dictionaries/ComplexKeyHashedDictionary.h>
|
||||
#include <DB/Dictionaries/ComplexKeyCacheDictionary.h>
|
||||
#include <DB/Dictionaries/RangeHashedDictionary.h>
|
||||
|
||||
#include <ext/range.hpp>
|
||||
@ -820,7 +821,8 @@ private:
|
||||
if (!executeDispatch<FlatDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatchComplex(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatchRange<RangeHashedDictionary>(block, arguments, result, dict_ptr))
|
||||
throw Exception{
|
||||
"Unsupported dictionary type " + dict_ptr->getTypeName(),
|
||||
@ -880,10 +882,11 @@ private:
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename DictionaryType>
|
||||
bool executeDispatchComplex(
|
||||
Block & block, const ColumnNumbers & arguments, const size_t result, const IDictionaryBase * const dictionary)
|
||||
{
|
||||
const auto dict = typeid_cast<const ComplexKeyHashedDictionary *>(dictionary);
|
||||
const auto dict = typeid_cast<const DictionaryType *>(dictionary);
|
||||
if (!dict)
|
||||
return false;
|
||||
|
||||
@ -1360,7 +1363,8 @@ private:
|
||||
if (!executeDispatch<FlatDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatchComplex(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict_ptr) &&
|
||||
!executeDispatchRange<RangeHashedDictionary>(block, arguments, result, dict_ptr))
|
||||
throw Exception{
|
||||
"Unsupported dictionary type " + dict_ptr->getTypeName(),
|
||||
@ -1422,10 +1426,11 @@ private:
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename DictionaryType>
|
||||
bool executeDispatchComplex(
|
||||
Block & block, const ColumnNumbers & arguments, const size_t result, const IDictionaryBase * const dictionary)
|
||||
{
|
||||
const auto dict = typeid_cast<const ComplexKeyHashedDictionary *>(dictionary);
|
||||
const auto dict = typeid_cast<const DictionaryType *>(dictionary);
|
||||
if (!dict)
|
||||
return false;
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <DB/Dictionaries/CacheDictionary.h>
|
||||
#include <DB/Dictionaries/RangeHashedDictionary.h>
|
||||
#include <DB/Dictionaries/ComplexKeyHashedDictionary.h>
|
||||
#include <DB/Dictionaries/ComplexKeyCacheDictionary.h>
|
||||
#include <DB/Dictionaries/DictionaryStructure.h>
|
||||
#include <memory>
|
||||
|
||||
@ -62,6 +63,29 @@ DictionaryPtr DictionaryFactory::create(const std::string & name, Poco::Util::Ab
|
||||
|
||||
return std::make_unique<ComplexKeyHashedDictionary>(name, dict_struct, std::move(source_ptr), dict_lifetime, require_nonempty);
|
||||
}
|
||||
else if ("complex_key_cache" == layout_type)
|
||||
{
|
||||
if (!dict_struct.key)
|
||||
throw Exception{
|
||||
"'key' is required for dictionary of layout 'complex_key_hashed'",
|
||||
ErrorCodes::BAD_ARGUMENTS
|
||||
};
|
||||
|
||||
const auto size = config.getInt(layout_prefix + ".complex_key_cache.size_in_cells");
|
||||
if (size == 0)
|
||||
throw Exception{
|
||||
name + ": dictionary of layout 'cache' cannot have 0 cells",
|
||||
ErrorCodes::TOO_SMALL_BUFFER_SIZE
|
||||
};
|
||||
|
||||
if (require_nonempty)
|
||||
throw Exception{
|
||||
name + ": dictionary of layout 'cache' cannot have 'require_nonempty' attribute set",
|
||||
ErrorCodes::BAD_ARGUMENTS
|
||||
};
|
||||
|
||||
return std::make_unique<ComplexKeyCacheDictionary>(name, dict_struct, std::move(source_ptr), dict_lifetime, size);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (dict_struct.key)
|
||||
|
Loading…
Reference in New Issue
Block a user