mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Fix tests
This commit is contained in:
parent
64e8f09649
commit
10c5518988
@ -177,13 +177,9 @@ static inline void insertDefaultValuesIntoColumns(
|
||||
const auto & default_value_provider = fetch_request.defaultValueProviderAtIndex(column_index);
|
||||
|
||||
if (fetch_request.shouldFillResultColumnWithIndex(column_index))
|
||||
{
|
||||
std::cerr << "insertDefaultValuesIntoColumns" << default_value_provider.getDefaultValue(row_index).dump() << std::endl;
|
||||
|
||||
column->insert(default_value_provider.getDefaultValue(row_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Deserialize column value and insert it in columns.
|
||||
/// Skip unnecessary columns that were not requested from deserialization.
|
||||
|
@ -970,8 +970,12 @@ private:
|
||||
|
||||
const auto * it = index.find(key);
|
||||
|
||||
if (it)
|
||||
if (!it)
|
||||
{
|
||||
++result.not_found_keys_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto & cell = it->getMapped();
|
||||
|
||||
bool has_deadline = cellHasDeadline(cell);
|
||||
@ -1029,9 +1033,6 @@ private:
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
++result.not_found_keys_size;
|
||||
}
|
||||
|
||||
/// Sort blocks by offset before start async io requests
|
||||
std::sort(blocks_to_request.begin(), blocks_to_request.end());
|
||||
@ -1119,9 +1120,10 @@ private:
|
||||
Cell cell;
|
||||
|
||||
setCellDeadline(cell, now);
|
||||
cell.index = {0, 0};
|
||||
cell.in_memory_partition_index = 0;
|
||||
cell.state = Cell::default_value;
|
||||
cell.index = {0, 0};
|
||||
|
||||
|
||||
if constexpr (dictionary_key_type == DictionaryKeyType::complex)
|
||||
{
|
||||
@ -1157,9 +1159,20 @@ private:
|
||||
|
||||
void insertCell(SSDCacheKeyType & ssd_cache_key, Cell & cell)
|
||||
{
|
||||
SSDCacheIndex cache_index;
|
||||
/** InsertCell has following flow
|
||||
|
||||
size_t loop_count = 0;
|
||||
1. We try to write key into current memory buffer, if write succeeded then return.
|
||||
2. Then if we does not write key into current memory buffer, we try to flush current memory buffer
|
||||
to disk.
|
||||
|
||||
If flush succeeded then reset current memory buffer, write key into it and return.
|
||||
If flush failed that means that current partition on disk is full, need to allocate new partition
|
||||
or start reusing old ones.
|
||||
|
||||
Retry to step 1.
|
||||
*/
|
||||
|
||||
SSDCacheIndex cache_index {0, 0};
|
||||
|
||||
while (true)
|
||||
{
|
||||
@ -1203,7 +1216,8 @@ private:
|
||||
|
||||
/// Check if key in index is key from old partition blocks
|
||||
if (old_key_cell.isOnDisk() &&
|
||||
old_key_block >= block_index_in_file_before_write && old_key_block <= file_read_end_block_index)
|
||||
old_key_block >= block_index_in_file_before_write &&
|
||||
old_key_block < file_read_end_block_index)
|
||||
index.erase(old_key);
|
||||
}
|
||||
}
|
||||
@ -1230,8 +1244,12 @@ private:
|
||||
auto key_to_update = keys_to_update[i];
|
||||
auto * it = index.find(key_to_update);
|
||||
|
||||
/// If lru cache does not contain old keys or there were duplicated keys in memory buffer partition
|
||||
if (!it || updated_keys.contains(it->getKey()))
|
||||
/// If there are not key to update or key to update not in memory
|
||||
if (!it || it->getMapped().state != Cell::in_memory)
|
||||
continue;
|
||||
|
||||
/// If there were duplicated keys in memory buffer partition
|
||||
if (updated_keys.contains(it->getKey()))
|
||||
continue;
|
||||
|
||||
updated_keys.insert(key_to_update);
|
||||
@ -1264,9 +1282,7 @@ private:
|
||||
{
|
||||
/// Try tro create next partition without reusing old partitions
|
||||
++current_partition_index;
|
||||
|
||||
file_buffer.allocateSizeForNextPartition();
|
||||
|
||||
memory_buffer_partitions.emplace_back(configuration.block_size, configuration.write_buffer_blocks_size);
|
||||
}
|
||||
else
|
||||
@ -1276,8 +1292,6 @@ private:
|
||||
file_buffer.reset();
|
||||
}
|
||||
}
|
||||
|
||||
++loop_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -389,6 +389,13 @@ public:
|
||||
|
||||
if (dictionary_key_type == DictionaryKeyType::simple)
|
||||
{
|
||||
if (!WhichDataType(key_col_with_type.type).isUInt64())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Third argument of function ({}) must be uint64 when dictionary is simple. Actual type ({}).",
|
||||
getName(),
|
||||
key_col_with_type.type->getName());
|
||||
|
||||
if (attribute_names.size() > 1)
|
||||
{
|
||||
const auto & result_tuple_type = assert_cast<const DataTypeTuple &>(*result_type);
|
||||
@ -448,6 +455,13 @@ public:
|
||||
}
|
||||
else if (dictionary_key_type == DictionaryKeyType::range)
|
||||
{
|
||||
if (!WhichDataType(key_col_with_type.type).isUInt64())
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Third argument of function ({}) must be uint64 when dictionary is range. Actual type ({}).",
|
||||
getName(),
|
||||
key_col_with_type.type->getName());
|
||||
|
||||
if (attribute_names.size() > 1)
|
||||
{
|
||||
const auto & result_tuple_type = assert_cast<const DataTypeTuple &>(*result_type);
|
||||
|
Loading…
Reference in New Issue
Block a user