Merge pull request #60005 from ClickHouse/non-significant-changes-3

Fix obviously wrong (but non significant) error in dictionaries
This commit is contained in:
Alexey Milovidov 2024-02-15 05:59:07 +01:00 committed by GitHub
commit 47d8248b9d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 53 additions and 47 deletions

View File

@ -78,22 +78,22 @@ public:
double getLoadFactor() const override;
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(hit_count.load(std::memory_order_acquire)) / queries;
return static_cast<double>(hit_count.load()) / queries;
}
bool supportUpdates() const override { return false; }

View File

@ -34,14 +34,14 @@ public:
size_t getBytesAllocated() const override { return 0; }
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override { return 1.0; }

View File

@ -41,14 +41,14 @@ public:
size_t getBytesAllocated() const override { return bytes_allocated; }
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override { return 1.0; }

View File

@ -57,14 +57,14 @@ public:
size_t getBytesAllocated() const override { return bytes_allocated; }
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override { return 1.0; }

View File

@ -99,14 +99,14 @@ public:
size_t getBytesAllocated() const override { return bytes_allocated; }
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override { return 1.0; }

View File

@ -109,6 +109,9 @@ public:
virtual size_t getQueryCount() const = 0;
/// The percentage of time a lookup successfully found an entry.
/// When there were no lookups, it returns zero (instead of NaN).
/// The value is calculated non atomically and can be slightly off in the presence of concurrent lookups.
virtual double getFoundRate() const = 0;
virtual double getHitRate() const = 0;

View File

@ -41,14 +41,14 @@ public:
size_t getBytesAllocated() const override { return bytes_allocated; }
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override { return 1.0; }

View File

@ -71,14 +71,14 @@ public:
size_t getBytesAllocated() const override { return bytes_allocated; }
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override { return 1.0; }

View File

@ -85,14 +85,14 @@ public:
size_t getBytesAllocated() const override { return bytes_allocated; }
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
size_t queries = query_count.load(std::memory_order_relaxed);
size_t queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override { return 1.0; }

View File

@ -58,14 +58,14 @@ public:
size_t getBytesAllocated() const override { return bytes_allocated; }
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
size_t getQueryCount() const override { return query_count.load(); }
double getFoundRate() const override
{
const auto queries = query_count.load(std::memory_order_relaxed);
const auto queries = query_count.load();
if (!queries)
return 0;
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
return std::min(1.0, static_cast<double>(found_count.load()) / queries);
}
double getHitRate() const override { return 1.0; }

View File

@ -85,7 +85,7 @@ VolumeJBOD::VolumeJBOD(const VolumeJBOD & volume_jbod,
DiskSelectorPtr disk_selector)
: VolumeJBOD(volume_jbod.name, config, config_prefix, disk_selector)
{
are_merges_avoided_user_override = volume_jbod.are_merges_avoided_user_override.load(std::memory_order_relaxed);
are_merges_avoided_user_override = volume_jbod.are_merges_avoided_user_override.load();
last_used = volume_jbod.last_used.load(std::memory_order_relaxed);
}

View File

@ -4154,12 +4154,12 @@ void Context::setMaxTableSizeToDrop(size_t max_size)
size_t Context::getMaxTableSizeToDrop() const
{
return shared->max_table_size_to_drop.load(std::memory_order_relaxed);
return shared->max_table_size_to_drop.load();
}
void Context::checkTableCanBeDropped(const String & database, const String & table, const size_t & table_size) const
{
size_t max_table_size_to_drop = shared->max_table_size_to_drop.load(std::memory_order_relaxed);
size_t max_table_size_to_drop = shared->max_table_size_to_drop.load();
checkCanBeDropped(database, table, table_size, max_table_size_to_drop);
}
@ -4177,12 +4177,12 @@ void Context::setMaxPartitionSizeToDrop(size_t max_size)
size_t Context::getMaxPartitionSizeToDrop() const
{
return shared->max_partition_size_to_drop.load(std::memory_order_relaxed);
return shared->max_partition_size_to_drop.load();
}
void Context::checkPartitionCanBeDropped(const String & database, const String & table, const size_t & partition_size) const
{
size_t max_partition_size_to_drop = shared->max_partition_size_to_drop.load(std::memory_order_relaxed);
size_t max_partition_size_to_drop = shared->max_partition_size_to_drop.load();
checkCanBeDropped(database, table, partition_size, max_partition_size_to_drop);
}

View File

@ -22,7 +22,7 @@ CREATE DICTIONARY simple_key_flat_dictionary_01862
value String
)
PRIMARY KEY id
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862'))
LAYOUT(FLAT())
LIFETIME(MIN 0 MAX 1000);
@ -43,7 +43,7 @@ CREATE DICTIONARY simple_key_direct_dictionary_01862
value String
)
PRIMARY KEY id
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862'))
LAYOUT(DIRECT());
-- check that found_rate is 0, not nan
@ -65,7 +65,7 @@ CREATE DICTIONARY simple_key_hashed_dictionary_01862
value String
)
PRIMARY KEY id
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862'))
LAYOUT(HASHED())
LIFETIME(MIN 0 MAX 1000);
@ -85,7 +85,7 @@ CREATE DICTIONARY simple_key_sparse_hashed_dictionary_01862
value String
)
PRIMARY KEY id
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862'))
LAYOUT(SPARSE_HASHED())
LIFETIME(MIN 0 MAX 1000);
@ -105,7 +105,7 @@ CREATE DICTIONARY simple_key_cache_dictionary_01862
value String
)
PRIMARY KEY id
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862'))
LAYOUT(CACHE(SIZE_IN_CELLS 100000))
LIFETIME(MIN 0 MAX 1000);
@ -143,7 +143,7 @@ CREATE DICTIONARY complex_key_hashed_dictionary_01862
value String
)
PRIMARY KEY id, id_key
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'complex_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'complex_key_source_table_01862'))
LAYOUT(COMPLEX_KEY_HASHED())
LIFETIME(MIN 0 MAX 1000);
@ -164,7 +164,7 @@ CREATE DICTIONARY complex_key_direct_dictionary_01862
value String
)
PRIMARY KEY id, id_key
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'complex_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'complex_key_source_table_01862'))
LAYOUT(COMPLEX_KEY_DIRECT());
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_direct_dictionary_01862';
@ -184,7 +184,7 @@ CREATE DICTIONARY complex_key_cache_dictionary_01862
value String
)
PRIMARY KEY id, id_key
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'complex_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'complex_key_source_table_01862'))
LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 100000))
LIFETIME(MIN 0 MAX 1000);
@ -223,7 +223,7 @@ CREATE DICTIONARY simple_key_range_hashed_dictionary_01862
last Date
)
PRIMARY KEY id
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_key_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'range_key_source_table_01862'))
LAYOUT(RANGE_HASHED())
RANGE(MIN first MAX last)
LIFETIME(MIN 0 MAX 1000);
@ -259,13 +259,16 @@ CREATE DICTIONARY ip_trie_dictionary_01862
value String
)
PRIMARY KEY prefix
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'ip_trie_source_table_01862'))
SOURCE(CLICKHOUSE(TABLE 'ip_trie_source_table_01862'))
LAYOUT(IP_TRIE())
LIFETIME(MIN 0 MAX 1000);
-- found_rate = 0, because we didn't make any searches.
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862';
-- found_rate = 1, because the dictionary covers the 127.0.0.1 address.
SELECT dictGet('ip_trie_dictionary_01862', 'value', tuple(toIPv4('127.0.0.1'))) FORMAT Null;
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862';
-- found_rate = 0.5, because the dictionary does not cover 1.1.1.1 and we have two lookups in total as of now.
SELECT dictGet('ip_trie_dictionary_01862', 'value', tuple(toIPv4('1.1.1.1'))) FORMAT Null;
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862';
@ -299,7 +302,7 @@ CREATE DICTIONARY polygon_dictionary_01862
name String
)
PRIMARY KEY key
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_01862'))
SOURCE(CLICKHOUSE(USER 'default' TABLE 'polygons_01862'))
LIFETIME(0)
LAYOUT(POLYGON());

View File

@ -23,14 +23,14 @@ function test_alter_profile()
${CLICKHOUSE_CLIENT} -q $"ALTER SETTINGS PROFILE ${PROFILE} SETTINGS max_sessions_for_user = ${max_session_count}"
# Create sesssions with $max_session_count resriction
# Create sessions with $max_session_count restriction
for ((i = 1 ; i <= ${max_session_count} ; i++)); do
local session_id="${SESSION_ID_PREFIX}_${i}"
# Skip output from this query
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=${USER}&session_id=${session_id}&session_check=0" --data-binary "SELECT 1" > /dev/null
done
# Update resriction to $alter_sessions_count
# Update restriction to $alter_sessions_count
${CLICKHOUSE_CLIENT} -q $"ALTER SETTINGS PROFILE ${PROFILE} SETTINGS max_sessions_for_user = ${alter_sessions_count}"
# Simultaneous sessions should use max settings from profile ($alter_sessions_count)