mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
Merge branch 'master' of github.com:yandex/ClickHouse into normalize-bigint
This commit is contained in:
commit
9753ddc8a0
@ -169,8 +169,8 @@ endif ()
|
||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
|
||||
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
|
||||
|
||||
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
|
||||
# Only for Linux, x86_64.
|
||||
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
|
||||
# Only for Linux, x86_64 or aarch64.
|
||||
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
|
||||
elseif(GLIBC_COMPATIBILITY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
|
||||
|
@ -15,7 +15,7 @@ if (GLIBC_COMPATIBILITY)
|
||||
|
||||
add_headers_and_sources(glibc_compatibility .)
|
||||
add_headers_and_sources(glibc_compatibility musl)
|
||||
if (ARCH_ARM)
|
||||
if (ARCH_AARCH64)
|
||||
list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s)
|
||||
set (musl_arch_include_dir musl/aarch64)
|
||||
elseif (ARCH_AMD64)
|
||||
|
@ -35,7 +35,7 @@ RUN apt-get update \
|
||||
ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal
|
||||
RUN pip3 install urllib3 testflows==1.6.74 docker-compose docker dicttoxml kazoo tzlocal python-dateutil numpy
|
||||
|
||||
ENV DOCKER_CHANNEL stable
|
||||
ENV DOCKER_VERSION 17.09.1-ce
|
||||
@ -74,4 +74,3 @@ VOLUME /var/lib/docker
|
||||
EXPOSE 2375
|
||||
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
||||
CMD ["sh", "-c", "python3 regression.py --no-color -o classic --local --clickhouse-binary-path ${CLICKHOUSE_TESTS_SERVER_BIN_PATH} --log test.log ${TESTFLOWS_OPTS}; cat test.log | tfs report results --format json > results.json; /usr/local/bin/process_testflows_result.py || echo -e 'failure\tCannot parse results' > check_status.tsv"]
|
||||
|
||||
|
@ -21,6 +21,7 @@ Columns:
|
||||
- `bytes_allocated` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Amount of RAM allocated for the dictionary.
|
||||
- `query_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of queries since the dictionary was loaded or since the last successful reboot.
|
||||
- `hit_rate` ([Float64](../../sql-reference/data-types/float.md)) — For cache dictionaries, the percentage of uses for which the value was in the cache.
|
||||
- `found_rate` ([Float64](../../sql-reference/data-types/float.md)) — The percentage of uses for which the value was found.
|
||||
- `element_count` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Number of items stored in the dictionary.
|
||||
- `load_factor` ([Float64](../../sql-reference/data-types/float.md)) — Percentage filled in the dictionary (for a hashed dictionary, the percentage filled in the hash table).
|
||||
- `source` ([String](../../sql-reference/data-types/string.md)) — Text describing the [data source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md) for the dictionary.
|
||||
|
@ -176,8 +176,9 @@ Columns CacheDictionary<dictionary_key_type>::getColumns(
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysExpired, expired_keys_size);
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysNotFound, not_found_keys_size);
|
||||
|
||||
query_count.fetch_add(keys.size());
|
||||
hit_count.fetch_add(found_keys_size);
|
||||
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
|
||||
hit_count.fetch_add(found_keys_size, std::memory_order_relaxed);
|
||||
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
|
||||
|
||||
MutableColumns & fetched_columns_from_storage = result_of_fetch_from_storage.fetched_columns;
|
||||
const PaddedPODArray<KeyState> & key_index_to_state_from_storage = result_of_fetch_from_storage.key_index_to_state;
|
||||
@ -296,8 +297,9 @@ ColumnUInt8::Ptr CacheDictionary<dictionary_key_type>::hasKeys(const Columns & k
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysExpired, expired_keys_size);
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysNotFound, not_found_keys_size);
|
||||
|
||||
query_count.fetch_add(keys.size());
|
||||
hit_count.fetch_add(found_keys_size);
|
||||
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
|
||||
hit_count.fetch_add(found_keys_size, std::memory_order_relaxed);
|
||||
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
|
||||
|
||||
size_t keys_to_update_size = expired_keys_size + not_found_keys_size;
|
||||
auto update_unit = std::make_shared<CacheDictionaryUpdateUnit<dictionary_key_type>>(key_columns, result_of_fetch_from_storage.key_index_to_state, request, keys_to_update_size);
|
||||
@ -365,8 +367,10 @@ ColumnPtr CacheDictionary<dictionary_key_type>::getHierarchy(
|
||||
{
|
||||
if (dictionary_key_type == DictionaryKeyType::simple)
|
||||
{
|
||||
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type);
|
||||
size_t keys_found;
|
||||
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type, keys_found);
|
||||
query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
return result;
|
||||
}
|
||||
else
|
||||
@ -381,8 +385,10 @@ ColumnUInt8::Ptr CacheDictionary<dictionary_key_type>::isInHierarchy(
|
||||
{
|
||||
if (dictionary_key_type == DictionaryKeyType::simple)
|
||||
{
|
||||
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type);
|
||||
size_t keys_found;
|
||||
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type, keys_found);
|
||||
query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
return result;
|
||||
}
|
||||
else
|
||||
@ -668,6 +674,8 @@ void CacheDictionary<dictionary_key_type>::update(CacheDictionaryUpdateUnitPtr<d
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedMiss, requested_keys_size - found_keys_size);
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheKeysRequestedFound, found_keys_size);
|
||||
ProfileEvents::increment(ProfileEvents::DictCacheRequests);
|
||||
|
||||
found_count.fetch_add(found_keys_size, std::memory_order_relaxed);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -75,9 +75,20 @@ public:
|
||||
|
||||
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getFoundRate() const override
|
||||
{
|
||||
size_t queries = query_count.load(std::memory_order_relaxed);
|
||||
if (!queries)
|
||||
return 0;
|
||||
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
|
||||
}
|
||||
|
||||
double getHitRate() const override
|
||||
{
|
||||
return static_cast<double>(hit_count.load(std::memory_order_acquire)) / query_count.load(std::memory_order_relaxed);
|
||||
size_t queries = query_count.load(std::memory_order_relaxed);
|
||||
if (!queries)
|
||||
return 0;
|
||||
return static_cast<double>(hit_count.load(std::memory_order_acquire)) / queries;
|
||||
}
|
||||
|
||||
bool supportUpdates() const override { return false; }
|
||||
@ -204,6 +215,7 @@ private:
|
||||
|
||||
mutable std::atomic<size_t> hit_count{0};
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
mutable std::atomic<size_t> found_count{0};
|
||||
|
||||
};
|
||||
|
||||
|
@ -106,6 +106,8 @@ Columns DirectDictionary<dictionary_key_type>::getColumns(
|
||||
|
||||
auto result_columns = request.makeAttributesResultColumns();
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
for (size_t attribute_index = 0; attribute_index < result_columns.size(); ++attribute_index)
|
||||
{
|
||||
if (!request.shouldFillResultColumnWithIndex(attribute_index))
|
||||
@ -124,7 +126,10 @@ Columns DirectDictionary<dictionary_key_type>::getColumns(
|
||||
const auto * it = key_to_fetched_index.find(requested_key);
|
||||
|
||||
if (it)
|
||||
{
|
||||
fetched_column_from_storage->get(it->getMapped(), value_to_insert);
|
||||
++keys_found;
|
||||
}
|
||||
else
|
||||
value_to_insert = default_value_provider.getDefaultValue(requested_key_index);
|
||||
|
||||
@ -133,6 +138,7 @@ Columns DirectDictionary<dictionary_key_type>::getColumns(
|
||||
}
|
||||
|
||||
query_count.fetch_add(requested_keys_size, std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return request.filterRequestedColumns(result_columns);
|
||||
}
|
||||
@ -181,6 +187,8 @@ ColumnUInt8::Ptr DirectDictionary<dictionary_key_type>::hasKeys(
|
||||
|
||||
stream->readPrefix();
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
while (const auto block = stream->read())
|
||||
{
|
||||
/// Split into keys columns and attribute columns
|
||||
@ -198,6 +206,8 @@ ColumnUInt8::Ptr DirectDictionary<dictionary_key_type>::hasKeys(
|
||||
assert(it);
|
||||
|
||||
size_t result_data_found_index = it->getMapped();
|
||||
/// block_keys_size cannot be used, due to duplicates.
|
||||
keys_found += !result_data[result_data_found_index];
|
||||
result_data[result_data_found_index] = true;
|
||||
|
||||
block_keys_extractor.rollbackCurrentKey();
|
||||
@ -209,6 +219,7 @@ ColumnUInt8::Ptr DirectDictionary<dictionary_key_type>::hasKeys(
|
||||
stream->readSuffix();
|
||||
|
||||
query_count.fetch_add(requested_keys_size, std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -220,8 +231,10 @@ ColumnPtr DirectDictionary<dictionary_key_type>::getHierarchy(
|
||||
{
|
||||
if (dictionary_key_type == DictionaryKeyType::simple)
|
||||
{
|
||||
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type);
|
||||
size_t keys_found;
|
||||
auto result = getKeysHierarchyDefaultImplementation(this, key_column, key_type, keys_found);
|
||||
query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
return result;
|
||||
}
|
||||
else
|
||||
@ -236,8 +249,10 @@ ColumnUInt8::Ptr DirectDictionary<dictionary_key_type>::isInHierarchy(
|
||||
{
|
||||
if (dictionary_key_type == DictionaryKeyType::simple)
|
||||
{
|
||||
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type);
|
||||
size_t keys_found = 0;
|
||||
auto result = getKeysIsInHierarchyDefaultImplementation(this, key_column, in_key_column, key_type, keys_found);
|
||||
query_count.fetch_add(key_column->size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
return result;
|
||||
}
|
||||
else
|
||||
|
@ -42,6 +42,14 @@ public:
|
||||
|
||||
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getFoundRate() const override
|
||||
{
|
||||
size_t queries = query_count.load(std::memory_order_relaxed);
|
||||
if (!queries)
|
||||
return 0;
|
||||
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
|
||||
}
|
||||
|
||||
double getHitRate() const override { return 1.0; }
|
||||
|
||||
size_t getElementCount() const override { return 0; }
|
||||
@ -101,6 +109,7 @@ private:
|
||||
const DictionaryLifetime dict_lifetime;
|
||||
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
mutable std::atomic<size_t> found_count{0};
|
||||
};
|
||||
|
||||
extern template class DirectDictionary<DictionaryKeyType::simple>;
|
||||
|
@ -130,13 +130,17 @@ ColumnUInt8::Ptr FlatDictionary::hasKeys(const Columns & key_columns, const Data
|
||||
auto result = ColumnUInt8::create(keys_size);
|
||||
auto & out = result->getData();
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
for (size_t key_index = 0; key_index < keys_size; ++key_index)
|
||||
{
|
||||
const auto key = keys[key_index];
|
||||
out[key_index] = key < loaded_keys.size() && loaded_keys[key];
|
||||
keys_found += out[key_index];
|
||||
}
|
||||
|
||||
query_count.fetch_add(keys_size, std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -154,16 +158,20 @@ ColumnPtr FlatDictionary::getHierarchy(ColumnPtr key_column, const DataTypePtr &
|
||||
|
||||
auto is_key_valid_func = [&, this](auto & key) { return key < loaded_keys.size() && loaded_keys[key]; };
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
auto get_parent_key_func = [&, this](auto & hierarchy_key)
|
||||
{
|
||||
bool is_key_valid = hierarchy_key < loaded_keys.size() && loaded_keys[hierarchy_key];
|
||||
std::optional<UInt64> result = is_key_valid ? std::make_optional(parent_keys[hierarchy_key]) : std::nullopt;
|
||||
keys_found += result.has_value();
|
||||
return result;
|
||||
};
|
||||
|
||||
auto dictionary_hierarchy_array = getKeysHierarchyArray(keys, null_value, is_key_valid_func, get_parent_key_func);
|
||||
|
||||
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return dictionary_hierarchy_array;
|
||||
}
|
||||
@ -187,16 +195,20 @@ ColumnUInt8::Ptr FlatDictionary::isInHierarchy(
|
||||
|
||||
auto is_key_valid_func = [&, this](auto & key) { return key < loaded_keys.size() && loaded_keys[key]; };
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
auto get_parent_key_func = [&, this](auto & hierarchy_key)
|
||||
{
|
||||
bool is_key_valid = hierarchy_key < loaded_keys.size() && loaded_keys[hierarchy_key];
|
||||
std::optional<UInt64> result = is_key_valid ? std::make_optional(parent_keys[hierarchy_key]) : std::nullopt;
|
||||
keys_found += result.has_value();
|
||||
return result;
|
||||
};
|
||||
|
||||
auto result = getKeysIsInHierarchyColumn(keys, keys_in, null_value, is_key_valid_func, get_parent_key_func);
|
||||
|
||||
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -223,9 +235,11 @@ ColumnPtr FlatDictionary::getDescendants(
|
||||
parent_to_child[parent_key].emplace_back(static_cast<UInt64>(i));
|
||||
}
|
||||
|
||||
auto result = getKeysDescendantsArray(keys, parent_to_child, level);
|
||||
size_t keys_found;
|
||||
auto result = getKeysDescendantsArray(keys, parent_to_child, level, keys_found);
|
||||
|
||||
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -392,17 +406,23 @@ void FlatDictionary::getItemsImpl(
|
||||
const auto & container = std::get<ContainerType<AttributeType>>(attribute.container);
|
||||
const auto rows = keys.size();
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
for (size_t row = 0; row < rows; ++row)
|
||||
{
|
||||
const auto key = keys[row];
|
||||
|
||||
if (key < loaded_keys.size() && loaded_keys[key])
|
||||
{
|
||||
set_value(row, static_cast<OutputType>(container[key]));
|
||||
++keys_found;
|
||||
}
|
||||
else
|
||||
set_value(row, default_value_extractor[row]);
|
||||
}
|
||||
|
||||
query_count.fetch_add(rows, std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -47,6 +47,14 @@ public:
|
||||
|
||||
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getFoundRate() const override
|
||||
{
|
||||
size_t queries = query_count.load(std::memory_order_relaxed);
|
||||
if (!queries)
|
||||
return 0;
|
||||
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
|
||||
}
|
||||
|
||||
double getHitRate() const override { return 1.0; }
|
||||
|
||||
size_t getElementCount() const override { return element_count; }
|
||||
@ -191,6 +199,7 @@ private:
|
||||
size_t element_count = 0;
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
mutable std::atomic<size_t> found_count{0};
|
||||
|
||||
BlockPtr update_field_loaded_block;
|
||||
};
|
||||
|
@ -162,6 +162,7 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::hasKeys(const Co
|
||||
|
||||
const auto & attribute = attributes.front();
|
||||
bool is_attribute_nullable = attribute.is_nullable_set.has_value();
|
||||
size_t keys_found = 0;
|
||||
|
||||
getAttributeContainer(0, [&](const auto & container)
|
||||
{
|
||||
@ -171,6 +172,8 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::hasKeys(const Co
|
||||
|
||||
out[requested_key_index] = container.find(requested_key) != container.end();
|
||||
|
||||
keys_found += out[requested_key_index];
|
||||
|
||||
if (is_attribute_nullable && !out[requested_key_index])
|
||||
out[requested_key_index] = attribute.is_nullable_set->find(requested_key) != nullptr;
|
||||
|
||||
@ -179,6 +182,7 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::hasKeys(const Co
|
||||
});
|
||||
|
||||
query_count.fetch_add(keys_size, std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -201,6 +205,8 @@ ColumnPtr HashedDictionary<dictionary_key_type, sparse>::getHierarchy(ColumnPtr
|
||||
|
||||
auto is_key_valid_func = [&](auto & key) { return parent_keys_map.find(key) != parent_keys_map.end(); };
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
auto get_parent_func = [&](auto & hierarchy_key)
|
||||
{
|
||||
std::optional<UInt64> result;
|
||||
@ -210,12 +216,15 @@ ColumnPtr HashedDictionary<dictionary_key_type, sparse>::getHierarchy(ColumnPtr
|
||||
if (it != parent_keys_map.end())
|
||||
result = getValueFromCell(it);
|
||||
|
||||
keys_found +=result.has_value();
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
auto dictionary_hierarchy_array = getKeysHierarchyArray(keys, null_value, is_key_valid_func, get_parent_func);
|
||||
|
||||
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return dictionary_hierarchy_array;
|
||||
}
|
||||
@ -247,6 +256,8 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::isInHierarchy(
|
||||
|
||||
auto is_key_valid_func = [&](auto & key) { return parent_keys_map.find(key) != parent_keys_map.end(); };
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
auto get_parent_func = [&](auto & hierarchy_key)
|
||||
{
|
||||
std::optional<UInt64> result;
|
||||
@ -256,12 +267,15 @@ ColumnUInt8::Ptr HashedDictionary<dictionary_key_type, sparse>::isInHierarchy(
|
||||
if (it != parent_keys_map.end())
|
||||
result = getValueFromCell(it);
|
||||
|
||||
keys_found += result.has_value();
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
auto result = getKeysIsInHierarchyColumn(keys, keys_in, null_value, is_key_valid_func, get_parent_func);
|
||||
|
||||
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -290,9 +304,11 @@ ColumnPtr HashedDictionary<dictionary_key_type, sparse>::getDescendants(
|
||||
for (const auto & [key, value] : parent_keys)
|
||||
parent_to_child[value].emplace_back(key);
|
||||
|
||||
auto result = getKeysDescendantsArray(keys, parent_to_child, level);
|
||||
size_t keys_found;
|
||||
auto result = getKeysDescendantsArray(keys, parent_to_child, level, keys_found);
|
||||
|
||||
query_count.fetch_add(keys.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -493,6 +509,8 @@ void HashedDictionary<dictionary_key_type, sparse>::getItemsImpl(
|
||||
|
||||
bool is_attribute_nullable = attribute.is_nullable_set.has_value();
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
for (size_t key_index = 0; key_index < keys_size; ++key_index)
|
||||
{
|
||||
auto key = keys_extractor.extractCurrentKey();
|
||||
@ -500,7 +518,10 @@ void HashedDictionary<dictionary_key_type, sparse>::getItemsImpl(
|
||||
const auto it = attribute_container.find(key);
|
||||
|
||||
if (it != attribute_container.end())
|
||||
{
|
||||
set_value(key_index, getValueFromCell(it));
|
||||
++keys_found;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (is_attribute_nullable && attribute.is_nullable_set->find(key) != nullptr)
|
||||
@ -513,6 +534,7 @@ void HashedDictionary<dictionary_key_type, sparse>::getItemsImpl(
|
||||
}
|
||||
|
||||
query_count.fetch_add(keys_size, std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type, bool sparse>
|
||||
|
@ -59,6 +59,14 @@ public:
|
||||
|
||||
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getFoundRate() const override
|
||||
{
|
||||
size_t queries = query_count.load(std::memory_order_relaxed);
|
||||
if (!queries)
|
||||
return 0;
|
||||
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
|
||||
}
|
||||
|
||||
double getHitRate() const override { return 1.0; }
|
||||
|
||||
size_t getElementCount() const override { return element_count; }
|
||||
@ -227,6 +235,7 @@ private:
|
||||
size_t element_count = 0;
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
mutable std::atomic<size_t> found_count{0};
|
||||
|
||||
BlockPtr update_field_loaded_block;
|
||||
Arena complex_key_arena;
|
||||
|
@ -84,8 +84,14 @@ namespace
|
||||
}
|
||||
}
|
||||
|
||||
ColumnPtr getKeysHierarchyDefaultImplementation(const IDictionary * dictionary, ColumnPtr key_column, const DataTypePtr & key_type)
|
||||
ColumnPtr getKeysHierarchyDefaultImplementation(
|
||||
const IDictionary * dictionary,
|
||||
ColumnPtr key_column,
|
||||
const DataTypePtr & key_type,
|
||||
size_t & valid_keys)
|
||||
{
|
||||
valid_keys = 0;
|
||||
|
||||
key_column = key_column->convertToFullColumnIfConst();
|
||||
const auto * key_column_typed = checkAndGetColumn<ColumnVector<UInt64>>(*key_column);
|
||||
if (!key_column_typed)
|
||||
@ -104,6 +110,7 @@ ColumnPtr getKeysHierarchyDefaultImplementation(const IDictionary * dictionary,
|
||||
{
|
||||
auto it = key_to_parent_key.find(key);
|
||||
std::optional<UInt64> result = (it != nullptr ? std::make_optional(it->getMapped()) : std::nullopt);
|
||||
valid_keys += result.has_value();
|
||||
return result;
|
||||
};
|
||||
|
||||
@ -117,8 +124,11 @@ ColumnUInt8::Ptr getKeysIsInHierarchyDefaultImplementation(
|
||||
const IDictionary * dictionary,
|
||||
ColumnPtr key_column,
|
||||
ColumnPtr in_key_column,
|
||||
const DataTypePtr & key_type)
|
||||
const DataTypePtr & key_type,
|
||||
size_t & valid_keys)
|
||||
{
|
||||
valid_keys = 0;
|
||||
|
||||
key_column = key_column->convertToFullColumnIfConst();
|
||||
in_key_column = in_key_column->convertToFullColumnIfConst();
|
||||
|
||||
@ -143,6 +153,7 @@ ColumnUInt8::Ptr getKeysIsInHierarchyDefaultImplementation(
|
||||
{
|
||||
auto it = key_to_parent_key.find(key);
|
||||
std::optional<UInt64> result = (it != nullptr ? std::make_optional(it->getMapped()) : std::nullopt);
|
||||
valid_keys += result.has_value();
|
||||
return result;
|
||||
};
|
||||
|
||||
|
@ -196,6 +196,8 @@ namespace detail
|
||||
* Hierarchy result is ElementsAndOffsets structure, for each element there is descendants array,
|
||||
* with size offset[element_index] - (element_index > 0 ? offset[element_index - 1] : 0).
|
||||
*
|
||||
* @param valid_keys - number of keys that are valid in parent_to_child map
|
||||
*
|
||||
* Example:
|
||||
* id parent_id
|
||||
* 1 0
|
||||
@ -218,11 +220,13 @@ namespace detail
|
||||
ElementsAndOffsets<KeyType> getDescendants(
|
||||
const PaddedPODArray<KeyType> & keys,
|
||||
const HashMap<KeyType, PaddedPODArray<KeyType>> & parent_to_child,
|
||||
Strategy strategy)
|
||||
Strategy strategy,
|
||||
size_t & valid_keys)
|
||||
{
|
||||
/// If strategy is GetAllDescendantsStrategy we try to cache and later reuse previously calculated descendants.
|
||||
/// If strategy is GetDescendantsAtSpecificLevelStrategy we does not use cache strategy.
|
||||
size_t keys_size = keys.size();
|
||||
valid_keys = 0;
|
||||
|
||||
PaddedPODArray<KeyType> descendants;
|
||||
descendants.reserve(keys_size);
|
||||
@ -265,6 +269,7 @@ namespace detail
|
||||
descendants_offsets.emplace_back(descendants.size());
|
||||
continue;
|
||||
}
|
||||
++valid_keys;
|
||||
|
||||
next_keys_to_process_stack.emplace_back(KeyAndDepth{requested_key, 0});
|
||||
|
||||
@ -425,43 +430,52 @@ ColumnUInt8::Ptr getKeysIsInHierarchyColumn(
|
||||
}
|
||||
|
||||
/// Returns descendants array column for keys
|
||||
///
|
||||
/// @param valid_keys - number of keys that are valid in parent_to_child map
|
||||
template <typename KeyType>
|
||||
ColumnPtr getKeysDescendantsArray(
|
||||
const PaddedPODArray<KeyType> & requested_keys,
|
||||
const HashMap<KeyType, PaddedPODArray<KeyType>> & parent_to_child,
|
||||
size_t level)
|
||||
size_t level,
|
||||
size_t & valid_keys)
|
||||
{
|
||||
if (level == 0)
|
||||
{
|
||||
detail::GetAllDescendantsStrategy strategy { .level = level };
|
||||
auto elements_and_offsets = detail::getDescendants(requested_keys, parent_to_child, strategy);
|
||||
auto elements_and_offsets = detail::getDescendants(requested_keys, parent_to_child, strategy, valid_keys);
|
||||
return detail::convertElementsAndOffsetsIntoArray(std::move(elements_and_offsets));
|
||||
}
|
||||
else
|
||||
{
|
||||
detail::GetDescendantsAtSpecificLevelStrategy strategy { .level = level };
|
||||
auto elements_and_offsets = detail::getDescendants(requested_keys, parent_to_child, strategy);
|
||||
auto elements_and_offsets = detail::getDescendants(requested_keys, parent_to_child, strategy, valid_keys);
|
||||
return detail::convertElementsAndOffsetsIntoArray(std::move(elements_and_offsets));
|
||||
}
|
||||
}
|
||||
|
||||
/** Default getHierarchy implementation for dictionaries that does not have structure with child to parent representation.
|
||||
* Implementation will build such structure with getColumn calls, and then getHierarchy for such structure.
|
||||
* Returns ColumnArray with hierarchy arrays for keys from key_column.
|
||||
*
|
||||
* @param valid_keys - number of keys (from @key_column) for which information about parent exists.
|
||||
* @return ColumnArray with hierarchy arrays for keys from key_column.
|
||||
*/
|
||||
ColumnPtr getKeysHierarchyDefaultImplementation(
|
||||
const IDictionary * dictionary,
|
||||
ColumnPtr key_column,
|
||||
const DataTypePtr & key_type);
|
||||
const DataTypePtr & key_type,
|
||||
size_t & valid_keys);
|
||||
|
||||
/** Default isInHierarchy implementation for dictionaries that does not have structure with child to parent representation.
|
||||
* Implementation will build such structure with getColumn calls, and then getHierarchy for such structure.
|
||||
* Returns UInt8 column if key from in_key_column is in key hierarchy from key_column.
|
||||
*
|
||||
* @param valid_keys - number of keys (from @key_column) for which information about parent exists.
|
||||
* @return UInt8 column if key from in_key_column is in key hierarchy from key_column.
|
||||
*/
|
||||
ColumnUInt8::Ptr getKeysIsInHierarchyDefaultImplementation(
|
||||
const IDictionary * dictionary,
|
||||
ColumnPtr key_column,
|
||||
ColumnPtr in_key_column,
|
||||
const DataTypePtr & key_type);
|
||||
const DataTypePtr & key_type,
|
||||
size_t & valid_keys);
|
||||
|
||||
}
|
||||
|
@ -90,6 +90,8 @@ struct IDictionary : public IExternalLoadable
|
||||
|
||||
virtual size_t getQueryCount() const = 0;
|
||||
|
||||
virtual double getFoundRate() const = 0;
|
||||
|
||||
virtual double getHitRate() const = 0;
|
||||
|
||||
virtual size_t getElementCount() const = 0;
|
||||
|
@ -275,7 +275,9 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
|
||||
const auto rows = first_column->size();
|
||||
|
||||
auto result = ColumnUInt8::create(rows);
|
||||
auto& out = result->getData();
|
||||
auto & out = result->getData();
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
if (first_column->isNumeric())
|
||||
{
|
||||
@ -285,6 +287,7 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
|
||||
auto addrv4 = UInt32(first_column->get64(i));
|
||||
auto found = tryLookupIPv4(addrv4, addrv6_buf);
|
||||
out[i] = (found != ipNotFound());
|
||||
keys_found += out[i];
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -297,10 +300,12 @@ ColumnUInt8::Ptr IPAddressDictionary::hasKeys(const Columns & key_columns, const
|
||||
|
||||
auto found = tryLookupIPv6(reinterpret_cast<const uint8_t *>(addr.data));
|
||||
out[i] = (found != ipNotFound());
|
||||
keys_found += out[i];
|
||||
}
|
||||
}
|
||||
|
||||
query_count.fetch_add(rows, std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -680,6 +685,8 @@ void IPAddressDictionary::getItemsImpl(
|
||||
|
||||
auto & vec = std::get<ContainerType<AttributeType>>(attribute.maps);
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
if (first_column->isNumeric())
|
||||
{
|
||||
uint8_t addrv6_buf[IPV6_BINARY_LENGTH];
|
||||
@ -689,7 +696,10 @@ void IPAddressDictionary::getItemsImpl(
|
||||
auto addrv4 = UInt32(first_column->get64(i));
|
||||
auto found = tryLookupIPv4(addrv4, addrv6_buf);
|
||||
if (found != ipNotFound())
|
||||
{
|
||||
set_value(i, static_cast<OutputType>(vec[*found]));
|
||||
++keys_found;
|
||||
}
|
||||
else
|
||||
set_value(i, default_value_extractor[i]);
|
||||
}
|
||||
@ -704,13 +714,17 @@ void IPAddressDictionary::getItemsImpl(
|
||||
|
||||
auto found = tryLookupIPv6(reinterpret_cast<const uint8_t *>(addr.data));
|
||||
if (found != ipNotFound())
|
||||
{
|
||||
set_value(i, static_cast<OutputType>(vec[*found]));
|
||||
++keys_found;
|
||||
}
|
||||
else
|
||||
set_value(i, default_value_extractor[i]);
|
||||
}
|
||||
}
|
||||
|
||||
query_count.fetch_add(rows, std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -38,6 +38,14 @@ public:
|
||||
|
||||
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getFoundRate() const override
|
||||
{
|
||||
size_t queries = query_count.load(std::memory_order_relaxed);
|
||||
if (!queries)
|
||||
return 0;
|
||||
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
|
||||
}
|
||||
|
||||
double getHitRate() const override { return 1.0; }
|
||||
|
||||
size_t getElementCount() const override { return element_count; }
|
||||
@ -210,6 +218,7 @@ private:
|
||||
size_t element_count = 0;
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
mutable std::atomic<size_t> found_count{0};
|
||||
|
||||
Poco::Logger * logger;
|
||||
};
|
||||
|
@ -64,6 +64,8 @@ ColumnPtr IPolygonDictionary::getColumn(
|
||||
Field row_value_to_insert;
|
||||
size_t polygon_index = 0;
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
if (unlikely(complex_attribute))
|
||||
{
|
||||
for (size_t requested_key_index = 0; requested_key_index < requested_key_points.size(); ++requested_key_index)
|
||||
@ -74,6 +76,7 @@ ColumnPtr IPolygonDictionary::getColumn(
|
||||
{
|
||||
size_t attribute_values_index = polygon_index_to_attribute_value_index[polygon_index];
|
||||
attribute_values_column->get(attribute_values_index, row_value_to_insert);
|
||||
++keys_found;
|
||||
}
|
||||
else
|
||||
row_value_to_insert = default_value_provider.getDefaultValue(requested_key_index);
|
||||
@ -110,6 +113,7 @@ ColumnPtr IPolygonDictionary::getColumn(
|
||||
size_t attribute_values_index = polygon_index_to_attribute_value_index[polygon_index];
|
||||
auto data_to_insert = attribute_values_column->getDataAt(attribute_values_index);
|
||||
result_column_typed.insertData(data_to_insert.data, data_to_insert.size);
|
||||
++keys_found;
|
||||
}
|
||||
else
|
||||
result_column_typed.insert(default_value_provider.getDefaultValue(requested_key_index));
|
||||
@ -129,6 +133,7 @@ ColumnPtr IPolygonDictionary::getColumn(
|
||||
size_t attribute_values_index = polygon_index_to_attribute_value_index[polygon_index];
|
||||
auto & item = attribute_data[attribute_values_index];
|
||||
result_data.emplace_back(item);
|
||||
++keys_found;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -143,6 +148,7 @@ ColumnPtr IPolygonDictionary::getColumn(
|
||||
}
|
||||
|
||||
query_count.fetch_add(requested_key_points.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -285,16 +291,20 @@ ColumnUInt8::Ptr IPolygonDictionary::hasKeys(const Columns & key_columns, const
|
||||
std::vector<IPolygonDictionary::Point> points = extractPoints(key_columns);
|
||||
|
||||
auto result = ColumnUInt8::create(points.size());
|
||||
auto& out = result->getData();
|
||||
auto & out = result->getData();
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
for (size_t i = 0; i < points.size(); ++i)
|
||||
{
|
||||
size_t unused_find_result = 0;
|
||||
auto & point = points[i];
|
||||
out[i] = find(point, unused_find_result);
|
||||
keys_found += out[i];
|
||||
}
|
||||
|
||||
query_count.fetch_add(points.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -63,6 +63,14 @@ public:
|
||||
|
||||
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getFoundRate() const override
|
||||
{
|
||||
size_t queries = query_count.load(std::memory_order_relaxed);
|
||||
if (!queries)
|
||||
return 0;
|
||||
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
|
||||
}
|
||||
|
||||
double getHitRate() const override { return 1.0; }
|
||||
|
||||
size_t getElementCount() const override { return attributes.empty() ? 0 : attributes.front()->size(); }
|
||||
@ -141,6 +149,7 @@ private:
|
||||
|
||||
size_t bytes_allocated = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
mutable std::atomic<size_t> found_count{0};
|
||||
|
||||
/** Since the original data may have been in the form of multi-polygons, an id is stored for each single polygon
|
||||
* corresponding to the row in which any other attributes for this entry are located.
|
||||
|
@ -195,17 +195,20 @@ ColumnUInt8::Ptr RangeHashedDictionary::hasKeys(const Columns & key_columns, con
|
||||
|
||||
ColumnUInt8::Ptr result;
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
auto type_call = [&](const auto & dictionary_attribute_type)
|
||||
{
|
||||
using Type = std::decay_t<decltype(dictionary_attribute_type)>;
|
||||
using AttributeType = typename Type::AttributeType;
|
||||
using ValueType = DictionaryValueType<AttributeType>;
|
||||
result = hasKeysImpl<ValueType>(attribute, ids, dates);
|
||||
result = hasKeysImpl<ValueType>(attribute, ids, dates, keys_found);
|
||||
};
|
||||
|
||||
callOnDictionaryAttributeType(attribute.type, type_call);
|
||||
|
||||
query_count.fetch_add(ids.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -214,13 +217,16 @@ template <typename AttributeType>
|
||||
ColumnUInt8::Ptr RangeHashedDictionary::hasKeysImpl(
|
||||
const Attribute & attribute,
|
||||
const PaddedPODArray<UInt64> & ids,
|
||||
const PaddedPODArray<RangeStorageType> & dates) const
|
||||
const PaddedPODArray<RangeStorageType> & dates,
|
||||
size_t & keys_found) const
|
||||
{
|
||||
auto result = ColumnUInt8::create(ids.size());
|
||||
auto& out = result->getData();
|
||||
|
||||
const auto & attr = *std::get<Ptr<AttributeType>>(attribute.maps);
|
||||
|
||||
keys_found = 0;
|
||||
|
||||
for (const auto row : ext::range(0, ids.size()))
|
||||
{
|
||||
const auto it = attr.find(ids[row]);
|
||||
@ -237,10 +243,8 @@ ColumnUInt8::Ptr RangeHashedDictionary::hasKeysImpl(
|
||||
return v.range.contains(date);
|
||||
});
|
||||
|
||||
if (val_it != std::end(ranges_and_values))
|
||||
out[row] = true;
|
||||
else
|
||||
out[row] = false;
|
||||
out[row] = val_it != std::end(ranges_and_values);
|
||||
keys_found += out[row];
|
||||
}
|
||||
else
|
||||
out[row] = false;
|
||||
@ -396,6 +400,8 @@ void RangeHashedDictionary::getItemsImpl(
|
||||
|
||||
const auto & attr = *std::get<Ptr<AttributeType>>(attribute.maps);
|
||||
|
||||
size_t keys_found = 0;
|
||||
|
||||
for (const auto row : ext::range(0, ids.size()))
|
||||
{
|
||||
const auto it = attr.find(ids[row]);
|
||||
@ -413,7 +419,8 @@ void RangeHashedDictionary::getItemsImpl(
|
||||
|
||||
if (val_it != std::end(ranges_and_values))
|
||||
{
|
||||
auto& value = val_it->value;
|
||||
++keys_found;
|
||||
auto & value = val_it->value;
|
||||
|
||||
if (value)
|
||||
set_value(row, static_cast<OutputType>(*value), false); // NOLINT
|
||||
@ -432,6 +439,7 @@ void RangeHashedDictionary::getItemsImpl(
|
||||
}
|
||||
|
||||
query_count.fetch_add(ids.size(), std::memory_order_relaxed);
|
||||
found_count.fetch_add(keys_found, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
|
||||
|
@ -32,6 +32,14 @@ public:
|
||||
|
||||
size_t getQueryCount() const override { return query_count.load(std::memory_order_relaxed); }
|
||||
|
||||
double getFoundRate() const override
|
||||
{
|
||||
size_t queries = query_count.load(std::memory_order_relaxed);
|
||||
if (!queries)
|
||||
return 0;
|
||||
return static_cast<double>(found_count.load(std::memory_order_relaxed)) / queries;
|
||||
}
|
||||
|
||||
double getHitRate() const override { return 1.0; }
|
||||
|
||||
size_t getElementCount() const override { return element_count; }
|
||||
@ -171,7 +179,8 @@ private:
|
||||
ColumnUInt8::Ptr hasKeysImpl(
|
||||
const Attribute & attribute,
|
||||
const PaddedPODArray<UInt64> & ids,
|
||||
const PaddedPODArray<RangeStorageType> & dates) const;
|
||||
const PaddedPODArray<RangeStorageType> & dates,
|
||||
size_t & keys_found) const;
|
||||
|
||||
template <typename T>
|
||||
static void setAttributeValueImpl(Attribute & attribute, const UInt64 id, const Range & range, const Field & value);
|
||||
@ -209,6 +218,7 @@ private:
|
||||
size_t element_count = 0;
|
||||
size_t bucket_count = 0;
|
||||
mutable std::atomic<size_t> query_count{0};
|
||||
mutable std::atomic<size_t> found_count{0};
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -154,10 +154,12 @@ TEST(HierarchyDictionariesUtils, getDescendants)
|
||||
PaddedPODArray<UInt64> keys = {0, 1, 2, 3, 4};
|
||||
|
||||
{
|
||||
size_t keys_found;
|
||||
auto result = DB::detail::getDescendants(
|
||||
keys,
|
||||
parent_to_child,
|
||||
DB::detail::GetAllDescendantsStrategy());
|
||||
DB::detail::GetAllDescendantsStrategy(),
|
||||
keys_found);
|
||||
|
||||
const auto & actual_elements = result.elements;
|
||||
const auto & actual_offsets = result.offsets;
|
||||
@ -167,12 +169,15 @@ TEST(HierarchyDictionariesUtils, getDescendants)
|
||||
|
||||
ASSERT_EQ(actual_elements, expected_elements);
|
||||
ASSERT_EQ(actual_offsets, expected_offsets);
|
||||
ASSERT_EQ(keys_found, 3);
|
||||
}
|
||||
{
|
||||
size_t keys_found;
|
||||
auto result = DB::detail::getDescendants(
|
||||
keys,
|
||||
parent_to_child,
|
||||
DB::detail::GetDescendantsAtSpecificLevelStrategy{1});
|
||||
DB::detail::GetDescendantsAtSpecificLevelStrategy{1},
|
||||
keys_found);
|
||||
|
||||
const auto & actual_elements = result.elements;
|
||||
const auto & actual_offsets = result.offsets;
|
||||
@ -182,6 +187,7 @@ TEST(HierarchyDictionariesUtils, getDescendants)
|
||||
|
||||
ASSERT_EQ(actual_elements, expected_elements);
|
||||
ASSERT_EQ(actual_offsets, expected_offsets);
|
||||
ASSERT_EQ(keys_found, 3);
|
||||
}
|
||||
}
|
||||
{
|
||||
@ -192,10 +198,12 @@ TEST(HierarchyDictionariesUtils, getDescendants)
|
||||
PaddedPODArray<UInt64> keys = {1, 2, 3};
|
||||
|
||||
{
|
||||
size_t keys_found;
|
||||
auto result = DB::detail::getDescendants(
|
||||
keys,
|
||||
parent_to_child,
|
||||
DB::detail::GetAllDescendantsStrategy());
|
||||
DB::detail::GetAllDescendantsStrategy(),
|
||||
keys_found);
|
||||
|
||||
const auto & actual_elements = result.elements;
|
||||
const auto & actual_offsets = result.offsets;
|
||||
@ -205,12 +213,15 @@ TEST(HierarchyDictionariesUtils, getDescendants)
|
||||
|
||||
ASSERT_EQ(actual_elements, expected_elements);
|
||||
ASSERT_EQ(actual_offsets, expected_offsets);
|
||||
ASSERT_EQ(keys_found, 2);
|
||||
}
|
||||
{
|
||||
size_t keys_found;
|
||||
auto result = DB::detail::getDescendants(
|
||||
keys,
|
||||
parent_to_child,
|
||||
DB::detail::GetDescendantsAtSpecificLevelStrategy{1});
|
||||
DB::detail::GetDescendantsAtSpecificLevelStrategy{1},
|
||||
keys_found);
|
||||
|
||||
const auto & actual_elements = result.elements;
|
||||
const auto & actual_offsets = result.offsets;
|
||||
@ -220,6 +231,7 @@ TEST(HierarchyDictionariesUtils, getDescendants)
|
||||
|
||||
ASSERT_EQ(actual_elements, expected_elements);
|
||||
ASSERT_EQ(actual_offsets, expected_offsets);
|
||||
ASSERT_EQ(keys_found, 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int DECIMAL_OVERFLOW;
|
||||
}
|
||||
|
||||
/** arrayDifference() - returns an array with the difference between all pairs of neighboring elements.
|
||||
@ -63,7 +64,23 @@ struct ArrayDifferenceImpl
|
||||
else
|
||||
{
|
||||
Element curr = src[pos];
|
||||
dst[pos] = curr - prev;
|
||||
|
||||
if constexpr (IsDecimalNumber<Element>)
|
||||
{
|
||||
using ResultNativeType = typename Result::NativeType;
|
||||
|
||||
ResultNativeType result_value;
|
||||
bool overflow = common::subOverflow(static_cast<ResultNativeType>(curr.value), static_cast<ResultNativeType>(prev), result_value);
|
||||
if (overflow)
|
||||
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "Decimal math overflow");
|
||||
|
||||
dst[pos] = Result(result_value);
|
||||
}
|
||||
else
|
||||
{
|
||||
dst[pos] = curr - prev;
|
||||
}
|
||||
|
||||
prev = curr;
|
||||
}
|
||||
}
|
||||
|
@ -858,9 +858,9 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, co
|
||||
ProfileEvents::increment(ProfileEvents::ExternalAggregationUncompressedBytes, uncompressed_bytes);
|
||||
|
||||
LOG_DEBUG(log,
|
||||
"Written part in {} sec., {} rows, {} uncompressed, {} compressed,"
|
||||
" {} uncompressed bytes per row, {} compressed bytes per row, compression rate: {}"
|
||||
" ({} rows/sec., {}/sec. uncompressed, {}/sec. compressed)",
|
||||
"Written part in {:.3f} sec., {} rows, {} uncompressed, {} compressed,"
|
||||
" {:.3f} uncompressed bytes per row, {:.3f} compressed bytes per row, compression rate: {:.3f}"
|
||||
" ({:.3f} rows/sec., {}/sec. uncompressed, {}/sec. compressed)",
|
||||
elapsed_seconds,
|
||||
rows,
|
||||
ReadableSize(uncompressed_bytes),
|
||||
@ -1493,7 +1493,7 @@ BlocksList Aggregator::convertToBlocks(AggregatedDataVariants & data_variants, b
|
||||
|
||||
double elapsed_seconds = watch.elapsedSeconds();
|
||||
LOG_DEBUG(log,
|
||||
"Converted aggregated data to blocks. {} rows, {} in {} sec. ({} rows/sec., {}/sec.)",
|
||||
"Converted aggregated data to blocks. {} rows, {} in {} sec. ({:.3f} rows/sec., {}/sec.)",
|
||||
rows, ReadableSize(bytes),
|
||||
elapsed_seconds, rows / elapsed_seconds,
|
||||
ReadableSize(bytes / elapsed_seconds));
|
||||
@ -2097,7 +2097,7 @@ Block Aggregator::mergeBlocks(BlocksList & blocks, bool final)
|
||||
size_t rows = block.rows();
|
||||
size_t bytes = block.bytes();
|
||||
double elapsed_seconds = watch.elapsedSeconds();
|
||||
LOG_DEBUG(log, "Merged partially aggregated blocks. {} rows, {}. in {} sec. ({} rows/sec., {}/sec.)",
|
||||
LOG_DEBUG(log, "Merged partially aggregated blocks. {} rows, {}. in {} sec. ({:.3f} rows/sec., {}/sec.)",
|
||||
rows, ReadableSize(bytes),
|
||||
elapsed_seconds, rows / elapsed_seconds,
|
||||
ReadableSize(bytes / elapsed_seconds));
|
||||
|
@ -541,7 +541,7 @@ void AggregatingTransform::initGenerate()
|
||||
double elapsed_seconds = watch.elapsedSeconds();
|
||||
size_t rows = variants.sizeWithoutOverflowRow();
|
||||
|
||||
LOG_DEBUG(log, "Aggregated. {} to {} rows (from {}) in {} sec. ({} rows/sec., {}/sec.)",
|
||||
LOG_DEBUG(log, "Aggregated. {} to {} rows (from {}) in {} sec. ({:.3f} rows/sec., {}/sec.)",
|
||||
src_rows, rows, ReadableSize(src_bytes),
|
||||
elapsed_seconds, src_rows / elapsed_seconds,
|
||||
ReadableSize(src_bytes / elapsed_seconds));
|
||||
|
@ -37,6 +37,7 @@ NamesAndTypesList StorageSystemDictionaries::getNamesAndTypes()
|
||||
{"bytes_allocated", std::make_shared<DataTypeUInt64>()},
|
||||
{"query_count", std::make_shared<DataTypeUInt64>()},
|
||||
{"hit_rate", std::make_shared<DataTypeFloat64>()},
|
||||
{"found_rate", std::make_shared<DataTypeFloat64>()},
|
||||
{"element_count", std::make_shared<DataTypeUInt64>()},
|
||||
{"load_factor", std::make_shared<DataTypeFloat64>()},
|
||||
{"source", std::make_shared<DataTypeString>()},
|
||||
@ -113,6 +114,7 @@ void StorageSystemDictionaries::fillData(MutableColumns & res_columns, ContextPt
|
||||
res_columns[i++]->insert(dict_ptr->getBytesAllocated());
|
||||
res_columns[i++]->insert(dict_ptr->getQueryCount());
|
||||
res_columns[i++]->insert(dict_ptr->getHitRate());
|
||||
res_columns[i++]->insert(dict_ptr->getFoundRate());
|
||||
res_columns[i++]->insert(dict_ptr->getElementCount());
|
||||
res_columns[i++]->insert(dict_ptr->getLoadFactor());
|
||||
res_columns[i++]->insert(dict_ptr->getSource()->toString());
|
||||
@ -125,7 +127,7 @@ void StorageSystemDictionaries::fillData(MutableColumns & res_columns, ContextPt
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t j = 0; j != 8; ++j) // Number of empty fields if dict_ptr is null
|
||||
for (size_t j = 0; j != 9; ++j) // Number of empty fields if dict_ptr is null
|
||||
res_columns[i++]->insertDefault();
|
||||
}
|
||||
|
||||
|
@ -0,0 +1 @@
|
||||
SELECT arrayDifference([toDecimal32(100.0000991821289, 0), -2147483647]) AS x; --{serverError 407}
|
@ -0,0 +1,33 @@
|
||||
simple_key_flat_dictionary_01862 1
|
||||
simple_key_flat_dictionary_01862 1
|
||||
simple_key_flat_dictionary_01862 0.67
|
||||
simple_key_direct_dictionary_01862 0
|
||||
simple_key_direct_dictionary_01862 0
|
||||
simple_key_direct_dictionary_01862 1
|
||||
simple_key_direct_dictionary_01862 0.5
|
||||
simple_key_hashed_dictionary_01862 0
|
||||
simple_key_hashed_dictionary_01862 1
|
||||
simple_key_hashed_dictionary_01862 0.5
|
||||
simple_key_sparse_hashed_dictionary_01862 0
|
||||
simple_key_sparse_hashed_dictionary_01862 1
|
||||
simple_key_sparse_hashed_dictionary_01862 0.5
|
||||
simple_key_cache_dictionary_01862 0
|
||||
simple_key_cache_dictionary_01862 1
|
||||
simple_key_cache_dictionary_01862 0.5
|
||||
complex_key_hashed_dictionary_01862 0
|
||||
complex_key_hashed_dictionary_01862 1
|
||||
complex_key_hashed_dictionary_01862 0.5
|
||||
complex_key_direct_dictionary_01862 0
|
||||
complex_key_direct_dictionary_01862 1
|
||||
complex_key_direct_dictionary_01862 0.5
|
||||
complex_key_cache_dictionary_01862 0
|
||||
complex_key_cache_dictionary_01862 1
|
||||
complex_key_cache_dictionary_01862 0.5
|
||||
simple_key_range_hashed_dictionary_01862 0
|
||||
simple_key_range_hashed_dictionary_01862 1
|
||||
simple_key_range_hashed_dictionary_01862 0.5
|
||||
ip_trie_dictionary_01862 0
|
||||
ip_trie_dictionary_01862 1
|
||||
ip_trie_dictionary_01862 0.5
|
||||
polygon_dictionary_01862 0
|
||||
polygon_dictionary_01862 0.8
|
309
tests/queries/0_stateless/01852_dictionary_found_rate_long.sql
Normal file
309
tests/queries/0_stateless/01852_dictionary_found_rate_long.sql
Normal file
@ -0,0 +1,309 @@
|
||||
--
|
||||
-- Simple key
|
||||
--
|
||||
|
||||
DROP TABLE IF EXISTS simple_key_source_table_01862;
|
||||
CREATE TABLE simple_key_source_table_01862
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
) ENGINE = Memory();
|
||||
|
||||
INSERT INTO simple_key_source_table_01862 VALUES (1, 'First');
|
||||
INSERT INTO simple_key_source_table_01862 VALUES (1, 'First');
|
||||
|
||||
-- simple flat
|
||||
DROP DICTIONARY IF EXISTS simple_key_flat_dictionary_01862;
|
||||
CREATE DICTIONARY simple_key_flat_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
|
||||
SELECT * FROM simple_key_flat_dictionary_01862 FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_flat_dictionary_01862';
|
||||
SELECT * FROM simple_key_flat_dictionary_01862 WHERE id = 0 FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_flat_dictionary_01862';
|
||||
SELECT dictGet('simple_key_flat_dictionary_01862', 'value', toUInt64(2)) FORMAT Null;
|
||||
SELECT name, round(found_rate, 2) FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_flat_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY simple_key_flat_dictionary_01862;
|
||||
|
||||
-- simple direct
|
||||
DROP DICTIONARY IF EXISTS simple_key_direct_dictionary_01862;
|
||||
CREATE DICTIONARY simple_key_direct_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
|
||||
LAYOUT(DIRECT());
|
||||
|
||||
-- check that found_rate is 0, not nan
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_direct_dictionary_01862';
|
||||
SELECT * FROM simple_key_direct_dictionary_01862 FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_direct_dictionary_01862';
|
||||
SELECT dictGet('simple_key_direct_dictionary_01862', 'value', toUInt64(1)) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_direct_dictionary_01862';
|
||||
SELECT dictGet('simple_key_direct_dictionary_01862', 'value', toUInt64(2)) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_direct_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY simple_key_direct_dictionary_01862;
|
||||
|
||||
-- simple hashed
|
||||
DROP DICTIONARY IF EXISTS simple_key_hashed_dictionary_01862;
|
||||
CREATE DICTIONARY simple_key_hashed_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
|
||||
LAYOUT(HASHED())
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_hashed_dictionary_01862';
|
||||
SELECT dictGet('simple_key_hashed_dictionary_01862', 'value', toUInt64(1)) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_hashed_dictionary_01862';
|
||||
SELECT dictGet('simple_key_hashed_dictionary_01862', 'value', toUInt64(2)) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_hashed_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY simple_key_hashed_dictionary_01862;
|
||||
|
||||
-- simple sparse_hashed
|
||||
DROP DICTIONARY IF EXISTS simple_key_sparse_hashed_dictionary_01862;
|
||||
CREATE DICTIONARY simple_key_sparse_hashed_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
|
||||
LAYOUT(SPARSE_HASHED())
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_sparse_hashed_dictionary_01862';
|
||||
SELECT dictGet('simple_key_sparse_hashed_dictionary_01862', 'value', toUInt64(1)) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_sparse_hashed_dictionary_01862';
|
||||
SELECT dictGet('simple_key_sparse_hashed_dictionary_01862', 'value', toUInt64(2)) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_sparse_hashed_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY simple_key_sparse_hashed_dictionary_01862;
|
||||
|
||||
-- simple cache
|
||||
DROP DICTIONARY IF EXISTS simple_key_cache_dictionary_01862;
|
||||
CREATE DICTIONARY simple_key_cache_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_source_table_01862'))
|
||||
LAYOUT(CACHE(SIZE_IN_CELLS 100000))
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_cache_dictionary_01862';
|
||||
SELECT toUInt64(1) as key, dictGet('simple_key_cache_dictionary_01862', 'value', key) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_cache_dictionary_01862';
|
||||
SELECT toUInt64(2) as key, dictGet('simple_key_cache_dictionary_01862', 'value', key) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_cache_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY simple_key_cache_dictionary_01862;
|
||||
|
||||
DROP TABLE simple_key_source_table_01862;
|
||||
|
||||
--
|
||||
-- Complex key
|
||||
--
|
||||
|
||||
DROP TABLE IF EXISTS complex_key_source_table_01862;
|
||||
CREATE TABLE complex_key_source_table_01862
|
||||
(
|
||||
id UInt64,
|
||||
id_key String,
|
||||
value String
|
||||
) ENGINE = Memory();
|
||||
|
||||
INSERT INTO complex_key_source_table_01862 VALUES (1, 'FirstKey', 'First');
|
||||
INSERT INTO complex_key_source_table_01862 VALUES (1, 'FirstKey', 'First');
|
||||
|
||||
-- complex hashed
|
||||
DROP DICTIONARY IF EXISTS complex_key_hashed_dictionary_01862;
|
||||
CREATE DICTIONARY complex_key_hashed_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
id_key String,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id, id_key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'complex_key_source_table_01862'))
|
||||
LAYOUT(COMPLEX_KEY_HASHED())
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_hashed_dictionary_01862';
|
||||
SELECT dictGet('complex_key_hashed_dictionary_01862', 'value', (toUInt64(1), 'FirstKey')) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_hashed_dictionary_01862';
|
||||
SELECT dictGet('complex_key_hashed_dictionary_01862', 'value', (toUInt64(2), 'FirstKey')) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_hashed_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY complex_key_hashed_dictionary_01862;
|
||||
|
||||
-- complex direct
|
||||
DROP DICTIONARY IF EXISTS complex_key_direct_dictionary_01862;
|
||||
CREATE DICTIONARY complex_key_direct_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
id_key String,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id, id_key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'complex_key_source_table_01862'))
|
||||
LAYOUT(COMPLEX_KEY_DIRECT());
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_direct_dictionary_01862';
|
||||
SELECT dictGet('complex_key_direct_dictionary_01862', 'value', (toUInt64(1), 'FirstKey')) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_direct_dictionary_01862';
|
||||
SELECT dictGet('complex_key_direct_dictionary_01862', 'value', (toUInt64(2), 'FirstKey')) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_direct_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY complex_key_direct_dictionary_01862;
|
||||
|
||||
-- complex cache
|
||||
DROP DICTIONARY IF EXISTS complex_key_cache_dictionary_01862;
|
||||
CREATE DICTIONARY complex_key_cache_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
id_key String,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id, id_key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'complex_key_source_table_01862'))
|
||||
LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 100000))
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_cache_dictionary_01862';
|
||||
SELECT dictGet('complex_key_cache_dictionary_01862', 'value', (toUInt64(1), 'FirstKey')) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_cache_dictionary_01862';
|
||||
SELECT dictGet('complex_key_cache_dictionary_01862', 'value', (toUInt64(2), 'FirstKey')) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_cache_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY complex_key_cache_dictionary_01862;
|
||||
|
||||
DROP TABLE complex_key_source_table_01862;
|
||||
|
||||
--
|
||||
-- Range
|
||||
--
|
||||
DROP TABLE IF EXISTS range_key_source_table_01862;
|
||||
CREATE TABLE range_key_source_table_01862
|
||||
(
|
||||
id UInt64,
|
||||
value String,
|
||||
first Date,
|
||||
last Date
|
||||
) ENGINE = Memory();
|
||||
|
||||
INSERT INTO range_key_source_table_01862 VALUES (1, 'First', today(), today());
|
||||
INSERT INTO range_key_source_table_01862 VALUES (1, 'First', today(), today());
|
||||
|
||||
-- simple range_hashed
|
||||
DROP DICTIONARY IF EXISTS simple_key_range_hashed_dictionary_01862;
|
||||
CREATE DICTIONARY simple_key_range_hashed_dictionary_01862
|
||||
(
|
||||
id UInt64,
|
||||
value String,
|
||||
first Date,
|
||||
last Date
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_key_source_table_01862'))
|
||||
LAYOUT(RANGE_HASHED())
|
||||
RANGE(MIN first MAX last)
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_range_hashed_dictionary_01862';
|
||||
SELECT dictGet('simple_key_range_hashed_dictionary_01862', 'value', toUInt64(1), today()) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_range_hashed_dictionary_01862';
|
||||
SELECT dictGet('simple_key_range_hashed_dictionary_01862', 'value', toUInt64(2), today()) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_range_hashed_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY simple_key_range_hashed_dictionary_01862;
|
||||
|
||||
DROP TABLE range_key_source_table_01862;
|
||||
|
||||
--
|
||||
-- IP Trie
|
||||
--
|
||||
DROP TABLE IF EXISTS ip_trie_source_table_01862;
|
||||
CREATE TABLE ip_trie_source_table_01862
|
||||
(
|
||||
prefix String,
|
||||
value String
|
||||
) ENGINE = Memory();
|
||||
|
||||
INSERT INTO ip_trie_source_table_01862 VALUES ('127.0.0.0/8', 'First');
|
||||
INSERT INTO ip_trie_source_table_01862 VALUES ('127.0.0.0/8', 'First');
|
||||
|
||||
-- ip_trie
|
||||
DROP DICTIONARY IF EXISTS ip_trie_dictionary_01862;
|
||||
CREATE DICTIONARY ip_trie_dictionary_01862
|
||||
(
|
||||
prefix String,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY prefix
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'ip_trie_source_table_01862'))
|
||||
LAYOUT(IP_TRIE())
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862';
|
||||
SELECT dictGet('ip_trie_dictionary_01862', 'value', tuple(toIPv4('127.0.0.1'))) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862';
|
||||
SELECT dictGet('ip_trie_dictionary_01862', 'value', tuple(toIPv4('1.1.1.1'))) FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862';
|
||||
|
||||
DROP DICTIONARY ip_trie_dictionary_01862;
|
||||
|
||||
DROP TABLE ip_trie_source_table_01862;
|
||||
|
||||
-- Polygon
|
||||
DROP TABLE IF EXISTS polygons_01862;
|
||||
CREATE TABLE polygons_01862 (
|
||||
key Array(Array(Array(Tuple(Float64, Float64)))),
|
||||
name String
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO polygons_01862 VALUES ([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'Click East');
|
||||
INSERT INTO polygons_01862 VALUES ([[[(-1, 1), (1, 1), (1, 3), (-1, 3)]]], 'Click North');
|
||||
INSERT INTO polygons_01862 VALUES ([[[(-3, 1), (-3, -1), (0, -1), (0, 1)]]], 'Click South');
|
||||
INSERT INTO polygons_01862 VALUES ([[[(-1, -1), (1, -1), (1, -3), (-1, -3)]]], 'Click West');
|
||||
|
||||
DROP TABLE IF EXISTS points_01862;
|
||||
CREATE TABLE points_01862 (x Float64, y Float64) ENGINE = Memory;
|
||||
INSERT INTO points_01862 VALUES ( 0.1, 0.0);
|
||||
INSERT INTO points_01862 VALUES (-0.1, 0.0);
|
||||
INSERT INTO points_01862 VALUES ( 0.0, 1.1);
|
||||
INSERT INTO points_01862 VALUES ( 0.0, -1.1);
|
||||
INSERT INTO points_01862 VALUES ( 3.0, 3.0);
|
||||
|
||||
DROP DICTIONARY IF EXISTS polygon_dictionary_01862;
|
||||
CREATE DICTIONARY polygon_dictionary_01862
|
||||
(
|
||||
key Array(Array(Array(Tuple(Float64, Float64)))),
|
||||
name String
|
||||
)
|
||||
PRIMARY KEY key
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons_01862'))
|
||||
LIFETIME(0)
|
||||
LAYOUT(POLYGON());
|
||||
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'polygon_dictionary_01862';
|
||||
SELECT tuple(x, y) as key, dictGet('polygon_dictionary_01862', 'name', key) FROM points_01862 FORMAT Null;
|
||||
SELECT name, found_rate FROM system.dictionaries WHERE database = currentDatabase() AND name = 'polygon_dictionary_01862';
|
||||
|
||||
DROP TABLE polygons_01862;
|
||||
DROP TABLE points_01862;
|
46
tests/testflows/datetime64_extended_range/common.py
Normal file
46
tests/testflows/datetime64_extended_range/common.py
Normal file
@ -0,0 +1,46 @@
|
||||
from multiprocessing.dummy import Pool
|
||||
|
||||
from testflows.core import *
|
||||
|
||||
def join(tasks, test=None):
|
||||
"""Join all parallel tests.
|
||||
"""
|
||||
exc = None
|
||||
|
||||
if test is None:
|
||||
test = current()
|
||||
|
||||
for task in tasks:
|
||||
try:
|
||||
task.get()
|
||||
except Exception as e:
|
||||
exc = e
|
||||
|
||||
if exc:
|
||||
raise exc
|
||||
|
||||
def start(pool, tasks, scenario, kwargs=None, test=None):
|
||||
"""Start parallel test.
|
||||
"""
|
||||
if test is None:
|
||||
test = current()
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
task = pool.apply_async(scenario, [], kwargs)
|
||||
tasks.append(task)
|
||||
return task
|
||||
|
||||
def run_scenario(pool, tasks, scenario, kwargs=None):
|
||||
"""Run scenario in parallel if parallel flag is set
|
||||
in the context.
|
||||
"""
|
||||
if kwargs is None:
|
||||
kwargs = {}
|
||||
|
||||
if current().context.parallel:
|
||||
start(pool, tasks, scenario, kwargs)
|
||||
else:
|
||||
scenario(**kwargs)
|
||||
|
||||
|
@ -0,0 +1,6 @@
|
||||
<yandex>
|
||||
<timezone>Europe/Moscow</timezone>
|
||||
<listen_host replace="replace">0.0.0.0</listen_host>
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
</yandex>
|
@ -0,0 +1,17 @@
|
||||
<yandex>
|
||||
<shutdown_wait_unfinished>3</shutdown_wait_unfinished>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/log.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
|
||||
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
|
||||
</logger>
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
<flush_interval_milliseconds>500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
</yandex>
|
@ -0,0 +1,5 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<https_port>8443</https_port>
|
||||
<tcp_port_secure>9440</tcp_port_secure>
|
||||
</yandex>
|
@ -0,0 +1,107 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<remote_servers>
|
||||
<replicated_cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>clickhouse1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>clickhouse2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>clickhouse3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</replicated_cluster>
|
||||
<!--
|
||||
<replicated_cluster_readonly>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>clickhouse1</host>
|
||||
<port>9000</port>
|
||||
<user>readonly</user>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>clickhouse2</host>
|
||||
<port>9000</port>
|
||||
<user>readonly</user>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>clickhouse3</host>
|
||||
<port>9000</port>
|
||||
<user>readonly</user>
|
||||
</replica>
|
||||
</shard>
|
||||
</replicated_cluster_readonly>
|
||||
-->
|
||||
<replicated_cluster_secure>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<host>clickhouse1</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>clickhouse2</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>clickhouse3</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
</replicated_cluster_secure>
|
||||
<sharded_cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse3</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</sharded_cluster>
|
||||
<sharded_cluster_secure>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse1</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse2</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>clickhouse3</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
</sharded_cluster_secure>
|
||||
</remote_servers>
|
||||
</yandex>
|
@ -0,0 +1,17 @@
|
||||
<yandex>
|
||||
<openSSL>
|
||||
<server>
|
||||
<certificateFile>/etc/clickhouse-server/ssl/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/ssl/server.key</privateKeyFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
</server>
|
||||
<client>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<verificationMode>none</verificationMode>
|
||||
<invalidCertificateHandler>
|
||||
<name>AcceptCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
</yandex>
|
@ -0,0 +1,20 @@
|
||||
<yandex>
|
||||
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<default>
|
||||
<keep_free_space_bytes>1024</keep_free_space_bytes>
|
||||
</default>
|
||||
</disks>
|
||||
<policies>
|
||||
<default>
|
||||
<volumes>
|
||||
<default>
|
||||
<disk>default</disk>
|
||||
</default>
|
||||
</volumes>
|
||||
</default>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
|
||||
</yandex>
|
@ -0,0 +1,10 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zookeeper</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<session_timeout_ms>15000</session_timeout_ms>
|
||||
</zookeeper>
|
||||
</yandex>
|
@ -0,0 +1,436 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
NOTE: User and query level settings are set up in "users.xml" file.
|
||||
-->
|
||||
<yandex>
|
||||
<logger>
|
||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||
</logger>
|
||||
<!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_port_secure>9440</tcp_port_secure>
|
||||
-->
|
||||
|
||||
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
|
||||
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
</server>
|
||||
|
||||
<client> <!-- Used for connecting to https dictionary source -->
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
|
||||
<invalidCertificateHandler>
|
||||
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||
<!--
|
||||
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||
-->
|
||||
|
||||
<!-- Port for communication between replicas. Used for data exchange. -->
|
||||
<interserver_http_port>9009</interserver_http_port>
|
||||
|
||||
<!-- Hostname that is used by other replicas to request this server.
|
||||
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||
This setting could be used to switch replication to another network interface.
|
||||
-->
|
||||
<!--
|
||||
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
||||
-->
|
||||
|
||||
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
|
||||
<!-- <listen_host>::</listen_host> -->
|
||||
<!-- Same for hosts with disabled ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
|
||||
<!--
|
||||
<listen_host>::1</listen_host>
|
||||
<listen_host>127.0.0.1</listen_host>
|
||||
-->
|
||||
<!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
|
||||
<!-- <listen_try>0</listen_try> -->
|
||||
|
||||
<!-- Allow listen on same address:port -->
|
||||
<!-- <listen_reuse_port>0</listen_reuse_port> -->
|
||||
|
||||
<!-- <listen_backlog>64</listen_backlog> -->
|
||||
|
||||
<max_connections>4096</max_connections>
|
||||
<keep_alive_timeout>3</keep_alive_timeout>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
|
||||
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
|
||||
correct maximum value. -->
|
||||
<!-- <max_open_files>262144</max_open_files> -->
|
||||
|
||||
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
|
||||
Uncompressed cache is advantageous only for very short queries and in rare cases.
|
||||
-->
|
||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||
|
||||
<!-- Approximate size of mark cache, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
You should not lower this value.
|
||||
-->
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
|
||||
|
||||
<!-- Path to data directory, with trailing slash. -->
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
<!-- Directory with user provided files that are accessible by 'file' table function. -->
|
||||
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
|
||||
|
||||
<!-- Path to folder where users and roles created by SQL commands are stored. -->
|
||||
<access_control_path>/var/lib/clickhouse/access/</access_control_path>
|
||||
|
||||
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||
<users_config>users.xml</users_config>
|
||||
|
||||
<!-- Default profile of settings. -->
|
||||
<default_profile>default</default_profile>
|
||||
|
||||
<!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
|
||||
<!-- <system_profile>default</system_profile> -->
|
||||
|
||||
<!-- Default database. -->
|
||||
<default_database>default</default_database>
|
||||
|
||||
<!-- Server time zone could be set here.
|
||||
|
||||
Time zone is used when converting between String and DateTime types,
|
||||
when printing DateTime in text formats and parsing DateTime from text,
|
||||
it is used in date and time related functions, if specific time zone was not passed as an argument.
|
||||
|
||||
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
|
||||
If not specified, system time zone at server startup is used.
|
||||
|
||||
Please note, that server could display time zone alias instead of specified name.
|
||||
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
|
||||
-->
|
||||
<!-- <timezone>Europe/Moscow</timezone> -->
|
||||
|
||||
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
|
||||
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
|
||||
-->
|
||||
<!-- <umask>022</umask> -->
|
||||
|
||||
<!-- Perform mlockall after startup to lower first queries latency
|
||||
and to prevent clickhouse executable from being paged out under high IO load.
|
||||
Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
|
||||
-->
|
||||
<mlock_executable>false</mlock_executable>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.yandex/docs/en/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers incl="remote" >
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
<test_cluster_two_shards_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_localhost>
|
||||
<test_shard_localhost_secure>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost_secure>
|
||||
<test_unavailable_shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>1</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_unavailable_shard>
|
||||
</remote_servers>
|
||||
|
||||
|
||||
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/docs/en/table_engines/replication/
|
||||
-->
|
||||
<zookeeper incl="zookeeper" optional="true" />
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
|
||||
-->
|
||||
<macros incl="macros" optional="true" />
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
|
||||
|
||||
|
||||
<!-- Maximum session timeout, in seconds. Default: 3600. -->
|
||||
<max_session_timeout>3600</max_session_timeout>
|
||||
|
||||
<!-- Default session timeout, in seconds. Default: 60. -->
|
||||
<default_session_timeout>60</default_session_timeout>
|
||||
|
||||
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
|
||||
<!--
|
||||
interval - send every X second
|
||||
root_path - prefix for keys
|
||||
hostname_in_path - append hostname to root_path (default = true)
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
-->
|
||||
<!--
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>60</interval>
|
||||
<root_path>one_min</root_path>
|
||||
<hostname_in_path>true</hostname_in_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</graphite>
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>1</interval>
|
||||
<root_path>one_sec</root_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>false</asynchronous_metrics>
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
When query log structure is changed after system update,
|
||||
then old table will be renamed and new table will be created automatically.
|
||||
-->
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
<!--
|
||||
PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
|
||||
Example:
|
||||
event_date
|
||||
toMonday(event_date)
|
||||
toYYYYMM(event_date)
|
||||
toStartOfHour(event_time)
|
||||
-->
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
|
||||
<!-- Trace log. Stores stack traces collected by query profilers.
|
||||
See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
|
||||
<trace_log>
|
||||
<database>system</database>
|
||||
<table>trace_log</table>
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</trace_log>
|
||||
|
||||
<!-- Query thread log. Has information about all threads participated in query execution.
|
||||
Used only for queries with setting log_query_threads = 1. -->
|
||||
<query_thread_log>
|
||||
<database>system</database>
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_thread_log>
|
||||
|
||||
<!-- Uncomment if use part log.
|
||||
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to write text log into table.
|
||||
Text log contains all information from usual server log but stores it in structured and efficient way.
|
||||
<text_log>
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</text_log>
|
||||
-->
|
||||
|
||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
|
||||
-->
|
||||
|
||||
<!-- Path to file with region hierarchy. -->
|
||||
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
||||
|
||||
<!-- Path to directory with files containing names of regions -->
|
||||
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
||||
|
||||
|
||||
<!-- Configuration of external dictionaries. See:
|
||||
https://clickhouse.yandex/docs/en/dicts/external_dicts/
|
||||
-->
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
-->
|
||||
<compression incl="compression">
|
||||
<!--
|
||||
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
|
||||
<case>
|
||||
|
||||
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
|
||||
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
|
||||
|
||||
<!- - What compression method to use. - ->
|
||||
<method>zstd</method>
|
||||
</case>
|
||||
-->
|
||||
</compression>
|
||||
|
||||
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||
<distributed_ddl>
|
||||
<!-- Path in ZooKeeper to queue with DDL queries -->
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
|
||||
<!-- Settings from this profile will be used to execute DDL queries -->
|
||||
<!-- <profile>default</profile> -->
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
<!--
|
||||
<merge_tree>
|
||||
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
|
||||
</merge_tree>
|
||||
-->
|
||||
|
||||
<!-- Protection from accidental DROP.
|
||||
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
|
||||
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
|
||||
By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
|
||||
The same for max_partition_size_to_drop.
|
||||
Uncomment to disable protection.
|
||||
-->
|
||||
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
|
||||
<!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
|
||||
|
||||
<!-- Example of parameters for GraphiteMergeTree table engine -->
|
||||
<graphite_rollup_example>
|
||||
<pattern>
|
||||
<regexp>click_cost</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<default>
|
||||
<function>max</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3600</age>
|
||||
<precision>300</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup_example>
|
||||
|
||||
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
|
||||
The directory will be created if it doesn't exist.
|
||||
-->
|
||||
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||
|
||||
<!-- Uncomment to disable ClickHouse internal DNS caching. -->
|
||||
<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
|
||||
</yandex>
|
@ -0,0 +1,8 @@
|
||||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEAua92DDli13gJ+//ZXyGaggjIuidqB0crXfhUlsrBk9BV1hH3i7fR
|
||||
XGP9rUdk2ubnB3k2ejBStL5oBrkHm9SzUFSQHqfDjLZjKoUpOEmuDc4cHvX1XTR5
|
||||
Pr1vf5cd0yEncJWG5W4zyUB8k++SUdL2qaeslSs+f491HBLDYn/h8zCgRbBvxhxb
|
||||
9qeho1xcbnWeqkN6Kc9bgGozA16P9NLuuLttNnOblkH+lMBf42BSne/TWt3AlGZf
|
||||
slKmmZcySUhF8aKfJnLKbkBCFqOtFRh8zBA9a7g+BT/lSANATCDPaAk1YVih2EKb
|
||||
dpc3briTDbRsiqg2JKMI7+VdULY9bh3EawIBAg==
|
||||
-----END DH PARAMETERS-----
|
@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC/TCCAeWgAwIBAgIJANjx1QSR77HBMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV
|
||||
BAMMCWxvY2FsaG9zdDAgFw0xODA3MzAxODE2MDhaGA8yMjkyMDUxNDE4MTYwOFow
|
||||
FDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAs9uSo6lJG8o8pw0fbVGVu0tPOljSWcVSXH9uiJBwlZLQnhN4SFSFohfI
|
||||
4K8U1tBDTnxPLUo/V1K9yzoLiRDGMkwVj6+4+hE2udS2ePTQv5oaMeJ9wrs+5c9T
|
||||
4pOtlq3pLAdm04ZMB1nbrEysceVudHRkQbGHzHp6VG29Fw7Ga6YpqyHQihRmEkTU
|
||||
7UCYNA+Vk7aDPdMS/khweyTpXYZimaK9f0ECU3/VOeG3fH6Sp2X6FN4tUj/aFXEj
|
||||
sRmU5G2TlYiSIUMF2JPdhSihfk1hJVALrHPTU38SOL+GyyBRWdNcrIwVwbpvsvPg
|
||||
pryMSNxnpr0AK0dFhjwnupIv5hJIOQIDAQABo1AwTjAdBgNVHQ4EFgQUjPLb3uYC
|
||||
kcamyZHK4/EV8jAP0wQwHwYDVR0jBBgwFoAUjPLb3uYCkcamyZHK4/EV8jAP0wQw
|
||||
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAM/ocuDvfPus/KpMVD51j
|
||||
4IdlU8R0vmnYLQ+ygzOAo7+hUWP5j0yvq4ILWNmQX6HNvUggCgFv9bjwDFhb/5Vr
|
||||
85ieWfTd9+LTjrOzTw4avdGwpX9G+6jJJSSq15tw5ElOIFb/qNA9O4dBiu8vn03C
|
||||
L/zRSXrARhSqTW5w/tZkUcSTT+M5h28+Lgn9ysx4Ff5vi44LJ1NnrbJbEAIYsAAD
|
||||
+UA+4MBFKx1r6hHINULev8+lCfkpwIaeS8RL+op4fr6kQPxnULw8wT8gkuc8I4+L
|
||||
P9gg/xDHB44T3ADGZ5Ib6O0DJaNiToO6rnoaaxs0KkotbvDWvRoxEytSbXKoYjYp
|
||||
0g==
|
||||
-----END CERTIFICATE-----
|
@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz25KjqUkbyjyn
|
||||
DR9tUZW7S086WNJZxVJcf26IkHCVktCeE3hIVIWiF8jgrxTW0ENOfE8tSj9XUr3L
|
||||
OguJEMYyTBWPr7j6ETa51LZ49NC/mhox4n3Cuz7lz1Pik62WreksB2bThkwHWdus
|
||||
TKxx5W50dGRBsYfMenpUbb0XDsZrpimrIdCKFGYSRNTtQJg0D5WTtoM90xL+SHB7
|
||||
JOldhmKZor1/QQJTf9U54bd8fpKnZfoU3i1SP9oVcSOxGZTkbZOViJIhQwXYk92F
|
||||
KKF+TWElUAusc9NTfxI4v4bLIFFZ01ysjBXBum+y8+CmvIxI3GemvQArR0WGPCe6
|
||||
ki/mEkg5AgMBAAECggEATrbIBIxwDJOD2/BoUqWkDCY3dGevF8697vFuZKIiQ7PP
|
||||
TX9j4vPq0DfsmDjHvAPFkTHiTQXzlroFik3LAp+uvhCCVzImmHq0IrwvZ9xtB43f
|
||||
7Pkc5P6h1l3Ybo8HJ6zRIY3TuLtLxuPSuiOMTQSGRL0zq3SQ5DKuGwkz+kVjHXUN
|
||||
MR2TECFwMHKQ5VLrC+7PMpsJYyOMlDAWhRfUalxC55xOXTpaN8TxNnwQ8K2ISVY5
|
||||
212Jz/a4hn4LdwxSz3Tiu95PN072K87HLWx3EdT6vW4Ge5P/A3y+smIuNAlanMnu
|
||||
plHBRtpATLiTxZt/n6npyrfQVbYjSH7KWhB8hBHtaQKBgQDh9Cq1c/KtqDtE0Ccr
|
||||
/r9tZNTUwBE6VP+3OJeKdEdtsfuxjOCkS1oAjgBJiSDOiWPh1DdoDeVZjPKq6pIu
|
||||
Mq12OE3Doa8znfCXGbkSzEKOb2unKZMJxzrz99kXt40W5DtrqKPNb24CNqTiY8Aa
|
||||
CjtcX+3weat82VRXvph6U8ltMwKBgQDLxjiQQzNoY7qvg7CwJCjf9qq8jmLK766g
|
||||
1FHXopqS+dTxDLM8eJSRrpmxGWJvNeNc1uPhsKsKgotqAMdBUQTf7rSTbt4MyoH5
|
||||
bUcRLtr+0QTK9hDWMOOvleqNXha68vATkohWYfCueNsC60qD44o8RZAS6UNy3ENq
|
||||
cM1cxqe84wKBgQDKkHutWnooJtajlTxY27O/nZKT/HA1bDgniMuKaz4R4Gr1PIez
|
||||
on3YW3V0d0P7BP6PWRIm7bY79vkiMtLEKdiKUGWeyZdo3eHvhDb/3DCawtau8L2K
|
||||
GZsHVp2//mS1Lfz7Qh8/L/NedqCQ+L4iWiPnZ3THjjwn3CoZ05ucpvrAMwKBgB54
|
||||
nay039MUVq44Owub3KDg+dcIU62U+cAC/9oG7qZbxYPmKkc4oL7IJSNecGHA5SbU
|
||||
2268RFdl/gLz6tfRjbEOuOHzCjFPdvAdbysanpTMHLNc6FefJ+zxtgk9sJh0C4Jh
|
||||
vxFrw9nTKKzfEl12gQ1SOaEaUIO0fEBGbe8ZpauRAoGAMAlGV+2/K4ebvAJKOVTa
|
||||
dKAzQ+TD2SJmeR1HZmKDYddNqwtZlzg3v4ZhCk4eaUmGeC1Bdh8MDuB3QQvXz4Dr
|
||||
vOIP4UVaOr+uM+7TgAgVnP4/K6IeJGzUDhX93pmpWhODfdu/oojEKVcpCojmEmS1
|
||||
KCBtmIrQLqzMpnBpLNuSY+Q=
|
||||
-----END PRIVATE KEY-----
|
@ -0,0 +1,133 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
|
||||
<use_uncompressed_cache>0</use_uncompressed_cache>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | openssl dgst -sha1 -binary | openssl dgst -sha1
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password></password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.yandex.ru.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- Allow access management -->
|
||||
<access_management>1</access_management>
|
||||
|
||||
<!-- Example of row level security policy. -->
|
||||
<!-- <databases>
|
||||
<test>
|
||||
<filtered_table1>
|
||||
<filter>a = 1</filter>
|
||||
</filtered_table1>
|
||||
<filtered_table2>
|
||||
<filter>a + b < 1 or c - d > 5</filter>
|
||||
</filtered_table2>
|
||||
</test>
|
||||
</databases> -->
|
||||
</default>
|
||||
|
||||
<!-- Example of user with readonly access. -->
|
||||
<!-- <readonly>
|
||||
<password></password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
</networks>
|
||||
<profile>readonly</profile>
|
||||
<quota>default</quota>
|
||||
</readonly> -->
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</yandex>
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<macros>
|
||||
<replica>clickhouse1</replica>
|
||||
<shard>01</shard>
|
||||
<shard2>01</shard2>
|
||||
</macros>
|
||||
</yandex>
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<macros>
|
||||
<replica>clickhouse2</replica>
|
||||
<shard>01</shard>
|
||||
<shard2>02</shard2>
|
||||
</macros>
|
||||
</yandex>
|
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<macros>
|
||||
<replica>clickhouse3</replica>
|
||||
<shard>01</shard>
|
||||
<shard2>03</shard2>
|
||||
</macros>
|
||||
</yandex>
|
@ -0,0 +1,28 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-integration-test
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 3s
|
||||
timeout: 2s
|
||||
retries: 40
|
||||
start_period: 2s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,61 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 3s
|
||||
timeout: 2s
|
||||
retries: 5
|
||||
start_period: 2s
|
||||
security_opt:
|
||||
- label:disable
|
79
tests/testflows/datetime64_extended_range/regression.py
Executable file
79
tests/testflows/datetime64_extended_range/regression.py
Executable file
@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
from testflows.core import *
|
||||
|
||||
append_path(sys.path, "..")
|
||||
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from datetime64_extended_range.requirements import *
|
||||
from datetime64_extended_range.common import *
|
||||
|
||||
# cross-outs
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350: 128 and 256-bit types are not supported for now
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/17079#issuecomment-783396589 : leap seconds unsupported
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22824 : dateDiff not working woth dt64
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22852 : formatDateTime wrong value
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22854 : timeSlot(), toMonday() wrong when out of normal
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/16260 : timeSlots(), dateDiff() not working with DT64
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22927#issuecomment-816574952 : toRelative...Num() wrong when out of normal range
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22928 : toStartOf...() wrong when out of normal range
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22929 : toUnixTimestamp() exception when out of normal
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22930 : toWeek()
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22948 : toYearWeek()
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/22959 : toUnixTimestamp64*() wrong fractal seconds treatment
|
||||
|
||||
# For `reference times` test it is unclear how to evaluate correctness - majority of test cases are correct, and ONLY
|
||||
# Juba and Monrovia timezones are damaged - probably, due to wrong DST shifts lookup tables
|
||||
|
||||
xfails = {
|
||||
"type conversion/to int 8 16 32 64 128 256/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350")],
|
||||
"type conversion/to uint 8 16 32 64 256/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350")],
|
||||
"non existent time/leap seconds/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/17079#issuecomment-783396589")],
|
||||
"date time funcs/date diff/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22824")],
|
||||
"date time funcs/format date time/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22852")],
|
||||
"date time funcs/time slot/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22854")],
|
||||
"date time funcs/to monday/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22854")],
|
||||
"date time funcs/time slots/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/16260")],
|
||||
"date time funcs/to relative :/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22927#issuecomment-816574952")],
|
||||
"date time funcs/to start of :/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22928")],
|
||||
"date time funcs/to unix timestamp/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22929")],
|
||||
"date time funcs/to week/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22930")],
|
||||
"date time funcs/to year week/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22948")],
|
||||
"type conversion/to unix timestamp64 */:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22959")],
|
||||
"type conversion/from unix timestamp64 */:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/22959")],
|
||||
"type conversion/to int 8 16 32 64 128 256/:": [(Fail, "https://github.com/ClickHouse/ClickHouse/issues/16581#issuecomment-804360350")],
|
||||
"reference times/:": [(Fail, "check procedure unclear")],
|
||||
}
|
||||
|
||||
|
||||
@TestModule
|
||||
@Name("datetime64 extended range")
|
||||
@ArgumentParser(argparser)
|
||||
@Specifications(
|
||||
QA_SRS010_ClickHouse_DateTime64_Extended_Range
|
||||
)
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange("1.0"),
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, parallel=False, stress=False):
|
||||
"""ClickHouse DateTime64 Extended Range regression module.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
|
||||
}
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes) as cluster:
|
||||
self.context.cluster = cluster
|
||||
self.context.parallel = parallel
|
||||
self.context.stress = stress
|
||||
|
||||
Scenario(run=load("datetime64_extended_range.tests.generic", "generic"), flags=TE)
|
||||
Scenario(run=load("datetime64_extended_range.tests.non_existent_time", "feature"), flags=TE)
|
||||
Scenario(run=load("datetime64_extended_range.tests.reference_times", "reference_times"), flags=TE)
|
||||
Scenario(run=load("datetime64_extended_range.tests.date_time_functions", "date_time_funcs"), flags=TE)
|
||||
Scenario(run=load("datetime64_extended_range.tests.type_conversion", "type_conversion"), flags=TE)
|
||||
|
||||
if main():
|
||||
regression()
|
@ -0,0 +1 @@
|
||||
from .requirements import *
|
@ -0,0 +1,805 @@
|
||||
# QA-SRS010 ClickHouse DateTime64 Extended Range
|
||||
# Software Requirements Specification
|
||||
|
||||
## Table of Contents
|
||||
|
||||
* 1 [Revision History](#revision-history)
|
||||
* 2 [Introduction](#introduction)
|
||||
* 3 [Terminology](#terminology)
|
||||
* 3.1 [SRS](#srs)
|
||||
* 3.2 [Normal Date Range](#normal-date-range)
|
||||
* 3.3 [Extended Date Range](#extended-date-range)
|
||||
* 4 [Requirements](#requirements)
|
||||
* 4.1 [Generic](#generic)
|
||||
* 4.1.0.1 [RQ.SRS-010.DateTime64.ExtendedRange](#rqsrs-010datetime64extendedrange)
|
||||
* 4.1.0.2 [RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start](#rqsrs-010datetime64extendedrangenormalrangestart)
|
||||
* 4.1.0.3 [RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start.BeforeEpochForTimeZone](#rqsrs-010datetime64extendedrangenormalrangestartbeforeepochfortimezone)
|
||||
* 4.1.0.4 [RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End](#rqsrs-010datetime64extendedrangenormalrangeend)
|
||||
* 4.1.0.5 [RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End.AfterEpochForTimeZone](#rqsrs-010datetime64extendedrangenormalrangeendafterepochfortimezone)
|
||||
* 4.1.0.6 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions](#rqsrs-010datetime64extendedrangetypeconversionfunctions)
|
||||
* 4.1.0.7 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions](#rqsrs-010datetime64extendedrangedatesandtimesfunctions)
|
||||
* 4.1.0.8 [RQ.SRS-010.DateTime64.ExtendedRange.TimeZones](#rqsrs-010datetime64extendedrangetimezones)
|
||||
* 4.1.0.9 [RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime](#rqsrs-010datetime64extendedrangenonexistenttime)
|
||||
* 4.1.0.10 [RQ.SRS-010.DateTime64.ExtendedRange.Comparison](#rqsrs-010datetime64extendedrangecomparison)
|
||||
* 4.1.0.11 [RQ.SRS-010.DateTime64.ExtendedRange.SpecificTimestamps](#rqsrs-010datetime64extendedrangespecifictimestamps)
|
||||
* 4.2 [Specific](#specific)
|
||||
* 4.2.0.1 [RQ.SRS-010.DateTime64.ExtendedRange.Start](#rqsrs-010datetime64extendedrangestart)
|
||||
* 4.2.0.2 [RQ.SRS-010.DateTime64.ExtendedRange.End](#rqsrs-010datetime64extendedrangeend)
|
||||
* 4.2.0.3 [Non-Existent Time](#non-existent-time)
|
||||
* 4.2.0.3.1 [RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidDate](#rqsrs-010datetime64extendedrangenonexistenttimeinvaliddate)
|
||||
* 4.2.0.3.2 [RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidTime](#rqsrs-010datetime64extendedrangenonexistenttimeinvalidtime)
|
||||
* 4.2.0.3.3 [RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.TimeZoneSwitch](#rqsrs-010datetime64extendedrangenonexistenttimetimezoneswitch)
|
||||
* 4.2.0.3.4 [RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime](#rqsrs-010datetime64extendedrangenonexistenttimedaylightsavingtime)
|
||||
* 4.2.0.3.5 [RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime.Disappeared](#rqsrs-010datetime64extendedrangenonexistenttimedaylightsavingtimedisappeared)
|
||||
* 4.2.0.3.6 [RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.LeapSeconds](#rqsrs-010datetime64extendedrangenonexistenttimeleapseconds)
|
||||
* 4.2.0.4 [Dates And Times Functions](#dates-and-times-functions)
|
||||
* 4.2.0.4.1 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTimeZone](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstotimezone)
|
||||
* 4.2.0.4.2 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYear](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoyear)
|
||||
* 4.2.0.4.3 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toQuarter](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoquarter)
|
||||
* 4.2.0.4.4 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonth](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstomonth)
|
||||
* 4.2.0.4.5 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfYear](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstodayofyear)
|
||||
* 4.2.0.4.6 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfMonth](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstodayofmonth)
|
||||
* 4.2.0.4.7 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfWeek](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstodayofweek)
|
||||
* 4.2.0.4.8 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toHour](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstohour)
|
||||
* 4.2.0.4.9 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMinute](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstominute)
|
||||
* 4.2.0.4.10 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toSecond](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstosecond)
|
||||
* 4.2.0.4.11 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toUnixTimestamp](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstounixtimestamp)
|
||||
* 4.2.0.4.12 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfYear](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofyear)
|
||||
* 4.2.0.4.13 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfISOYear](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofisoyear)
|
||||
* 4.2.0.4.14 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfQuarter](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofquarter)
|
||||
* 4.2.0.4.15 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMonth](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofmonth)
|
||||
* 4.2.0.4.16 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonday](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstomonday)
|
||||
* 4.2.0.4.17 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfWeek](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofweek)
|
||||
* 4.2.0.4.18 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfDay](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofday)
|
||||
* 4.2.0.4.19 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfHour](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofhour)
|
||||
* 4.2.0.4.20 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMinute](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofminute)
|
||||
* 4.2.0.4.21 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfSecond](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofsecond)
|
||||
* 4.2.0.4.22 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoffiveminute)
|
||||
* 4.2.0.4.23 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoftenminutes)
|
||||
* 4.2.0.4.24 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFifteenMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoffifteenminutes)
|
||||
* 4.2.0.4.25 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfInterval](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofinterval)
|
||||
* 4.2.0.4.26 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTime](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstotime)
|
||||
* 4.2.0.4.27 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeYearNum](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstorelativeyearnum)
|
||||
* 4.2.0.4.28 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeQuarterNum](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstorelativequarternum)
|
||||
* 4.2.0.4.29 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMonthNum](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstorelativemonthnum)
|
||||
* 4.2.0.4.30 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeWeekNum](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstorelativeweeknum)
|
||||
* 4.2.0.4.31 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeDayNum](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstorelativedaynum)
|
||||
* 4.2.0.4.32 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeHourNum](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstorelativehournum)
|
||||
* 4.2.0.4.33 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMinuteNum](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstorelativeminutenum)
|
||||
* 4.2.0.4.34 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeSecondNum](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstorelativesecondnum)
|
||||
* 4.2.0.4.35 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOYear](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoisoyear)
|
||||
* 4.2.0.4.36 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOWeek](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoisoweek)
|
||||
* 4.2.0.4.37 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toWeek](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoweek)
|
||||
* 4.2.0.4.38 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYearWeek](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoyearweek)
|
||||
* 4.2.0.4.39 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.now](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsnow)
|
||||
* 4.2.0.4.40 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.today](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoday)
|
||||
* 4.2.0.4.41 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.yesterday](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsyesterday)
|
||||
* 4.2.0.4.42 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlot](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstimeslot)
|
||||
* 4.2.0.4.43 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMM](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoyyyymm)
|
||||
* 4.2.0.4.44 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDD](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoyyyymmdd)
|
||||
* 4.2.0.4.45 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDDhhmmss](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstoyyyymmddhhmmss)
|
||||
* 4.2.0.4.46 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addYears](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsaddyears)
|
||||
* 4.2.0.4.47 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMonths](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsaddmonths)
|
||||
* 4.2.0.4.48 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addWeeks](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsaddweeks)
|
||||
* 4.2.0.4.49 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addDays](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsadddays)
|
||||
* 4.2.0.4.50 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addHours](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsaddhours)
|
||||
* 4.2.0.4.51 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsaddminutes)
|
||||
* 4.2.0.4.52 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addSeconds](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsaddseconds)
|
||||
* 4.2.0.4.53 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addQuarters](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsaddquarters)
|
||||
* 4.2.0.4.54 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractYears](#rqsrs-010datetime64extendedrangedatesandtimesfunctionssubtractyears)
|
||||
* 4.2.0.4.55 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMonths](#rqsrs-010datetime64extendedrangedatesandtimesfunctionssubtractmonths)
|
||||
* 4.2.0.4.56 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractWeeks](#rqsrs-010datetime64extendedrangedatesandtimesfunctionssubtractweeks)
|
||||
* 4.2.0.4.57 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractDays](#rqsrs-010datetime64extendedrangedatesandtimesfunctionssubtractdays)
|
||||
* 4.2.0.4.58 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractHours](#rqsrs-010datetime64extendedrangedatesandtimesfunctionssubtracthours)
|
||||
* 4.2.0.4.59 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionssubtractminutes)
|
||||
* 4.2.0.4.60 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractSeconds](#rqsrs-010datetime64extendedrangedatesandtimesfunctionssubtractseconds)
|
||||
* 4.2.0.4.61 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractQuarters](#rqsrs-010datetime64extendedrangedatesandtimesfunctionssubtractquarters)
|
||||
* 4.2.0.4.62 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.dateDiff](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsdatediff)
|
||||
* 4.2.0.4.63 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlots](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstimeslots)
|
||||
* 4.2.0.4.64 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.formatDateTime](#rqsrs-010datetime64extendedrangedatesandtimesfunctionsformatdatetime)
|
||||
* 4.2.1 [Type Conversion Functions](#type-conversion-functions)
|
||||
* 4.2.1.4.1 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toInt(8|16|32|64|128|256)](#rqsrs-010datetime64extendedrangetypeconversionfunctionstoint8163264128256)
|
||||
* 4.2.1.4.2 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUInt(8|16|32|64|256)](#rqsrs-010datetime64extendedrangetypeconversionfunctionstouint8163264256)
|
||||
* 4.2.1.4.3 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toFloat(32|64)](#rqsrs-010datetime64extendedrangetypeconversionfunctionstofloat3264)
|
||||
* 4.2.1.4.4 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDate](#rqsrs-010datetime64extendedrangetypeconversionfunctionstodate)
|
||||
* 4.2.1.4.5 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime](#rqsrs-010datetime64extendedrangetypeconversionfunctionstodatetime)
|
||||
* 4.2.1.4.6 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64](#rqsrs-010datetime64extendedrangetypeconversionfunctionstodatetime64)
|
||||
* 4.2.1.4.7 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64.FromString.MissingTime](#rqsrs-010datetime64extendedrangetypeconversionfunctionstodatetime64fromstringmissingtime)
|
||||
* 4.2.1.4.8 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDecimal(32|64|128|256)](#rqsrs-010datetime64extendedrangetypeconversionfunctionstodecimal3264128256)
|
||||
* 4.2.1.4.9 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toString](#rqsrs-010datetime64extendedrangetypeconversionfunctionstostring)
|
||||
* 4.2.1.4.10 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.CAST(x,T)](#rqsrs-010datetime64extendedrangetypeconversionfunctionscastxt)
|
||||
* 4.2.1.4.11 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Milli](#rqsrs-010datetime64extendedrangetypeconversionfunctionstounixtimestamp64milli)
|
||||
* 4.2.1.4.12 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Micro](#rqsrs-010datetime64extendedrangetypeconversionfunctionstounixtimestamp64micro)
|
||||
* 4.2.1.4.13 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Nano](#rqsrs-010datetime64extendedrangetypeconversionfunctionstounixtimestamp64nano)
|
||||
* 4.2.1.4.14 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Milli](#rqsrs-010datetime64extendedrangetypeconversionfunctionsfromunixtimestamp64milli)
|
||||
* 4.2.1.4.15 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Micro](#rqsrs-010datetime64extendedrangetypeconversionfunctionsfromunixtimestamp64micro)
|
||||
* 4.2.1.4.16 [RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Nano](#rqsrs-010datetime64extendedrangetypeconversionfunctionsfromunixtimestamp64nano)
|
||||
* 5 [References](#references)
|
||||
|
||||
## Revision History
|
||||
|
||||
This document is stored in an electronic form using [Git] source control management software
|
||||
hosted in a [GitHub Repository].
|
||||
All the updates are tracked using the [Revision History].
|
||||
|
||||
## Introduction
|
||||
|
||||
This document will cover requirements to support extended range for the [DateTime64] data type
|
||||
that is outside the normal **1970** (1970-01-02 00:00:00 UTC) to **2105** (2105-12-31 23:59:59.99999 UTC) date range.
|
||||
|
||||
## Terminology
|
||||
|
||||
### SRS
|
||||
|
||||
Software Requirements Specification
|
||||
|
||||
### Normal Date Range
|
||||
|
||||
**1970** `1970-01-02T00:00:00.000000` to **2105** `2105-12-31T23:59:59.99999`
|
||||
|
||||
### Extended Date Range
|
||||
|
||||
**1925** `1925-01-01T00:00:00.000000` to **2238** `2238-12-31 23:59:59.99999`
|
||||
|
||||
## Requirements
|
||||
|
||||
### Generic
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support extended range for the [DateTime64] data type that includes dates from the year **1925** to **2238**.
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper time handling around the normal date range that starts at `1970-01-01 00:00:00.000`
|
||||
expressed using the [ISO 8601 format].
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.Start.BeforeEpochForTimeZone
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper time handling around the start of the [normal date range]
|
||||
when this time for the time zone is before the start of the [normal date range].
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper time handling around the normal date range that ends at `2105-12-31T23:59:59.99999`
|
||||
expressed using the [ISO 8601 format].
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.NormalRange.End.AfterEpochForTimeZone
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper time handling around the end of the [normal date range]
|
||||
when this time for the time zone is after the end of the [normal date range].
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper conversion to and from [DateTime64] data type from other data types.
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [Dates and Times Functions] with the [DateTime64] data type
|
||||
when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.TimeZones
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation with the [DateTime64] extended range data type
|
||||
when combined with a supported time zone.
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper handling of non-existent times when using [DateTime64] extended range data type.
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.Comparison
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper handling of time comparison when using [DateTime64] extended range data type.
|
||||
For example, `SELECT toDateTime64('2019-05-05 20:20:12.050', 3) < now()`.
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.SpecificTimestamps
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL properly work with the following timestamps in all supported timezones:
|
||||
```
|
||||
[9961200,73476000,325666800,354675600,370400400,386125200,388566010,401850000,417574811,496803600,528253200,624423614,636516015,671011200,717555600,752047218,859683600,922582800,1018173600,1035705600,1143334800,1162105223,1174784400,1194156000,1206838823,1224982823,1236495624,1319936400,1319936424,1425798025,1459040400,1509872400,2090451627,2140668000]
|
||||
```
|
||||
|
||||
|
||||
### Specific
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.Start
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support extended range for the [DateTime64] data type that starts at `1925-01-01T00:00:00.000000`
|
||||
expressed using the [ISO 8601 format].
|
||||
|
||||
##### RQ.SRS-010.DateTime64.ExtendedRange.End
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support extended range for the [DateTime64] data type that ends at `2238-12-31T23:59:59.999999`
|
||||
expressed using the [ISO 8601 format].
|
||||
|
||||
##### Non-Existent Time
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidDate
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper handling of invalid dates when using [DateTime64] extended range data type,
|
||||
such as:
|
||||
|
||||
* `YYYY-04-31, YYYY-06-31, YYYY-09-31, YYYY-11-31`
|
||||
* `1990-02-30 00:00:02`
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.InvalidTime
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper handling of invalid time for a timezone
|
||||
when using [DateTime64] extended range data type, for example,
|
||||
|
||||
* `2002-04-07 02:30:00` never happened at all in the US/Eastern timezone ([Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime))
|
||||
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.TimeZoneSwitch
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type
|
||||
when the invalid time is caused when *countries switch timezone definitions with no
|
||||
daylight savings time switch* [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime).
|
||||
|
||||
>
|
||||
> For example, in 1915 Warsaw switched from Warsaw time to Central European time with
|
||||
> no daylight savings transition. So at the stroke of midnight on August 5th 1915 the clocks
|
||||
> were wound back 24 minutes creating an ambiguous time period that cannot be specified without
|
||||
> referring to the timezone abbreviation or the actual UTC offset. In this case midnight happened twice,
|
||||
> neither time during a daylight saving time period. pytz handles this transition by treating the ambiguous
|
||||
> period before the switch as daylight savings time, and the ambiguous period after as standard time.
|
||||
>
|
||||
> [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime)
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type
|
||||
when for a given timezone time switches from standard to daylight saving.
|
||||
|
||||
> For example, in the US/Eastern timezone on the last Sunday morning in October, the following sequence happens:
|
||||
>
|
||||
> 01:00 EDT occurs
|
||||
> 1 hour later, instead of 2:00am the clock is turned back 1 hour and 01:00 happens again (this time 01:00 EST)
|
||||
> In fact, every instant between 01:00 and 02:00 occurs twice.
|
||||
> [Stuart Bishop: pytz library](http://pytz.sourceforge.net/#problems-with-localtime)
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.DaylightSavingTime.Disappeared
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper handling of invalid time when using [DateTime64] extended range data type
|
||||
for a given timezone when transition from the standard to daylight saving time causes an hour to disappear.
|
||||
|
||||
Expected behavior: if DateTime64 initialized by a skipped time value, it is being treated as DST and resulting value will be an hour earlier, e.g. `SELECT toDateTime64('2020-03-08 02:34:00', 0, 'America/Denver')` returns `2020-03-08 01:34:00`.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.NonExistentTime.LeapSeconds
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support proper handling of leap seconds adjustments when using [DateTime64] extended range data type.
|
||||
|
||||
##### Dates And Times Functions
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTimeZone
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toTimeZone](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#totimezone)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYear
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toYear](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toyear)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toQuarter
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toQuarter](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toquarter)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonth
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toMonth](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tomonth)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfYear
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toDayOfYear](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#todayofyear)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfMonth
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toDayOfMonth](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#todayofmonth)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toDayOfWeek
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toDayOfWeek](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#todayofweek)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toHour
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toHour](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tohour)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMinute
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toMinute](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tominute)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toSecond
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toSecond](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tosecond)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toUnixTimestamp
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toUnitTimestamp](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#to-unix-timestamp)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
Timestamp value expected to be negative when DateTime64 value is prior to `1970-01-01` and positine otherwise.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfYear
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfYear](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofyear)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfISOYear
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfISOYear](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofisoyear)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfQuarter
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfQuarter](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofquarter)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMonth
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfMonth](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofmonth)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toMonday
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toMonday](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tomonday)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfWeek
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfWeek](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofweektmode)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfDay
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfDay](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofday)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfHour
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfHour](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofhour)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMinute
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfMinute](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofminute)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfSecond
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfSecond](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofsecond)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinute](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminute)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfTenMinutes](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartoftenminutes)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFifteenMinutes
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfFifteenMinutes](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartoffifteenminutes)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfInterval
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toStartOfInterval](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#tostartofintervaltime-or-data-interval-x-unit-time-zone)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
More detailed description can be found [here](https://github.com/ClickHouse/ClickHouse/issues/1201).
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toTime
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toTime](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#totime)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeYearNum
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toRelativeYearNum](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#torelativeyearnum)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeQuarterNum
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toRelativeQuarterNum](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#torelativequarternum)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMonthNum
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toRelativeMonthNum](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#torelativemonthnum)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeWeekNum
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toRelativeWeekNum](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#torelativeweeknum)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeDayNum
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toRelativeDayNum](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#torelativedaynum)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeHourNum
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toRelativeHourNum](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#torelativehournum)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeMinuteNum
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toRelativeMinuteNum](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#torelativeminutenum)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toRelativeSecondNum
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toRelativeSecondNum](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#torelativesecondnum)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOYear
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toISOYear](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toisoyear)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toISOWeek
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toISOWeek](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toisoweek)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toWeek
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toWeek](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toweekdatemode)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYearWeek
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toYearWeek](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toyearweekdatemode)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.now
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support conversion of output from the [now](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#now)
|
||||
function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.today
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support conversion of output from the [today](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#today)
|
||||
function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.yesterday
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support conversion of output from the [yesterday](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#yesterday)
|
||||
function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlot
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support conversion of output from the [timeSlot](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#timeslot)
|
||||
function to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMM
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toYYYYMM](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toyyyymm)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDD
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toYYYYMMDD](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toyyyymmdd)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toYYYYMMDDhhmmss
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [toYYYYMMDDhhmmss](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#toyyyymmddhhmmss)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addYears
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [addYears](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMonths
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [addMonths](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addWeeks
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [addWeeks](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addDays
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [addDays](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addHours
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [addHours](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addMinutes
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [addMinutes](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addSeconds
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [addSeconds](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.addQuarters
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [addQuarters](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractYears
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [subtractYears](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMonths
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [subtractMonths](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractWeeks
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [subtractWeeks](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractDays
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [subtractDays](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractHours
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [subtractHours](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractMinutes
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [subtractMinutes](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractSeconds
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [subtractSeconds](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.subtractQuarters
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [subtractQuarters](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#subtractyears-subtractmonths-subtractweeks-subtractdays-subtracthours-subtractminutes-subtractseconds-subtractquarters)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.dateDiff
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [dateDiff](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#datediff)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.timeSlots
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [timeSlots](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#timeslotsstarttime-duration-size)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.formatDateTime
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct operation of the [formatDateTime](https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/#formatdatetime)
|
||||
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
|
||||
#### Type Conversion Functions
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toInt(8|16|32|64|128|256)
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to integer types using [toInt(8|16|32|64|128|256)](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#toint8163264128256) functions.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUInt(8|16|32|64|256)
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to unsigned integer types using [toUInt(8|16|32|64|256)](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#touint8163264256) functions.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toFloat(32|64)
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to float types using [toFloat(32|64)](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#tofloat3264) functions.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDate
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range]
|
||||
to the [Date](https://clickhouse.tech/docs/en/sql-reference/data-types/date/) type using the [toDate](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#todate) function.
|
||||
This function is ONLY supposed to work in NORMAL RANGE.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to the [DateTime](https://clickhouse.tech/docs/en/sql-reference/data-types/datetime/) type using the [toDateTime](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#todatetime) function.
|
||||
This function is ONLY supposed to work in NORMAL RANGE.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion from the data types supported by the [toDateTime64](https://clickhouse.tech/docs/en/sql-reference/data-types/datetime64/) function
|
||||
to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDateTime64.FromString.MissingTime
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion from the [String](https://clickhouse.tech/docs/en/sql-reference/data-types/string/)
|
||||
data type to the [DateTime64](https://clickhouse.tech/docs/en/sql-reference/data-types/datetime64/) data type
|
||||
when value of the string is missing the `hh:mm-ss.sss` part.
|
||||
For example, `toDateTime64('2020-01-01', 3)`.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toDecimal(32|64|128|256)
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to [Decimal](https://clickhouse.tech/docs/en/sql-reference/data-types/decimal/) types using [toDecimal(32|64|128|256)](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#todecimal3264128256) functions.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toString
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to the [String](https://clickhouse.tech/docs/en/sql-reference/data-types/string/) type using the [toString](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#tostring) function.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.CAST(x,T)
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to one of the supported data type using the [CAST(x,T)](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#type_conversion_function-cast) function.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Milli
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to the [Int64](https://clickhouse.tech/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Milli](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64milli) function.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Micro
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to the [Int64](https://clickhouse.tech/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Micro](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64micro) function.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.toUnixTimestamp64Nano
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion of the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
to the [Int64](https://clickhouse.tech/docs/en/sql-reference/data-types/int-uint/) type using the [toUnixTimestamp64Nano](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#tounixtimestamp64nano) function.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Milli
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.tech/docs/en/sql-reference/data-types/int-uint/) type
|
||||
to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
using the [fromUnixTimestamp64Milli](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64milli) function.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Micro
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.tech/docs/en/sql-reference/data-types/int-uint/) type
|
||||
to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
using the [fromUnixTimestamp64Micro](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64micro) function.
|
||||
|
||||
###### RQ.SRS-010.DateTime64.ExtendedRange.TypeConversionFunctions.fromUnixTimestamp64Nano
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support correct conversion from the [Int64](https://clickhouse.tech/docs/en/sql-reference/data-types/int-uint/) type
|
||||
to the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range]
|
||||
using the [fromUnixTimestamp64Nano](https://clickhouse.tech/docs/en/sql-reference/functions/type-conversion-functions/#fromunixtimestamp64nano) function.
|
||||
|
||||
## References
|
||||
|
||||
* **DateTime64**: https://clickhouse.tech/docs/en/sql-reference/data-types/datetime64/
|
||||
* **ISO 8601 format**: https://en.wikipedia.org/wiki/ISO_8601
|
||||
* **ClickHouse:** https://clickhouse.tech
|
||||
* **GitHub Repository:** https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/datetime64_extended_range/requirements/requirements.md
|
||||
* **Revision History:** https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/datetime64_extended_range/requirements/requirements.md
|
||||
* **Git:** https://git-scm.com/
|
||||
|
||||
[SRS]: #srs
|
||||
[normal date range]: #normal-date-range
|
||||
[extended date range]: #extended-date-range
|
||||
[Dates and Times Functions]: https://clickhouse.tech/docs/en/sql-reference/functions/date-time-functions/
|
||||
[DateTime64]: https://clickhouse.tech/docs/en/sql-reference/data-types/datetime64/
|
||||
[ISO 8601 format]: https://en.wikipedia.org/wiki/ISO_8601
|
||||
[ClickHouse]: https://clickhouse.tech
|
||||
[GitHub Repository]: https://github.com/ClickHouse/ClickHouse/blob/master/tests/testflows/datetime64_extended_range/requirements/requirements.md
|
||||
[Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/datetime64_extended_range/requirements/requirements.md
|
||||
[Git]: https://git-scm.com/
|
||||
[GitHub]: https://github.com
|
File diff suppressed because it is too large
Load Diff
179
tests/testflows/datetime64_extended_range/tests/common.py
Normal file
179
tests/testflows/datetime64_extended_range/tests/common.py
Normal file
@ -0,0 +1,179 @@
|
||||
import pytz
|
||||
import datetime
|
||||
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
from contextlib import contextmanager
|
||||
from datetime64_extended_range.common import *
|
||||
|
||||
|
||||
def in_normal_range(dt: datetime.datetime):
|
||||
"""Check if DateTime is in normal range
|
||||
"""
|
||||
return dt <= datetime.datetime(2105, 12, 31, 23, 59, 59, 999999) and dt >= datetime.datetime(1970, 1, 1, 0, 0, 0)
|
||||
|
||||
|
||||
def years_range(stress=False, padding=(0, 0)):
|
||||
"""Returns a set of year values used for testing.
|
||||
"""
|
||||
return range(1925+padding[0], 2283-padding[1]) if stress else (1927, 2000, 2281)
|
||||
|
||||
|
||||
def timezones_range(stress=False):
|
||||
"""Returns a set of timezone values used for testing.
|
||||
"""
|
||||
if stress:
|
||||
return pytz.all_timezones
|
||||
else:
|
||||
return ['UTC', 'Asia/Novosibirsk', 'America/Denver']
|
||||
|
||||
|
||||
@contextmanager
|
||||
def create_table(timezone, node):
|
||||
try:
|
||||
node.query(f"CREATE TABLE dt(timestamp DateTime64(3, {timezone})) Engine = TinyLog")
|
||||
yield
|
||||
finally:
|
||||
node.query("DROP TABLE dt")
|
||||
|
||||
|
||||
@TestOutline
|
||||
def insert_check_datetime(self, datetime, expected, precision=0, timezone="UTC"):
|
||||
"""Check how a particular datetime value works with different
|
||||
functions that accept DateTime64 data type.
|
||||
|
||||
:param datetime: datetime string
|
||||
:param expected: expected result
|
||||
:param precision: time precision, default: 0
|
||||
:param timezone: timezone, default: UTC
|
||||
"""
|
||||
with create_table(timezone, self.context.node):
|
||||
with When("I use toDateTime64"):
|
||||
r = self.context.node.query(f"SELECT toDateTime64('{datetime}', {precision}, '{timezone}')")
|
||||
|
||||
with Then(f"I expect {expected}"):
|
||||
assert r.output == expected, error()
|
||||
|
||||
|
||||
def datetime_generator(year, microseconds=False):
|
||||
"""Helper generator
|
||||
"""
|
||||
date = datetime.datetime(year, 1, 1, 0, 0, 0)
|
||||
if microseconds:
|
||||
date = datetime.datetime(year, 1, 1, 0, 0, 0, 123000)
|
||||
while not (date.month == 12 and date.day == 31):
|
||||
yield date
|
||||
date = date + datetime.timedelta(days=1, hours=1, minutes=1, seconds=1)
|
||||
|
||||
|
||||
def select_dates_in_year(year, stress=False, microseconds=False):
|
||||
"""Returns various datetimes in a year that are to be checked
|
||||
"""
|
||||
if not stress:
|
||||
dates = [datetime.datetime(year, 1, 1, 0, 0, 0), datetime.datetime(year, 12, 31, 23, 59, 59)]
|
||||
if microseconds:
|
||||
dates = [datetime.datetime(year, 1, 1, 0, 0, 0, 123000), datetime.datetime(year, 12, 31, 23, 59, 59, 123000)]
|
||||
if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
|
||||
dates.append(datetime.datetime(year, 2, 29, 11, 11, 11, 123000))
|
||||
return dates
|
||||
else:
|
||||
return datetime_generator(year)
|
||||
|
||||
|
||||
@TestOutline
|
||||
def select_check_datetime(self, datetime, expected, precision=0, timezone="UTC"):
|
||||
"""Check how a particular datetime value works with different
|
||||
functions that accept DateTime64 data type.
|
||||
|
||||
:param datetime: datetime string
|
||||
:param expected: expected result
|
||||
:param precision: time precision, default: 0
|
||||
:param timezone: timezone, default: UTC
|
||||
"""
|
||||
with When("I use toDateTime64"):
|
||||
r = self.context.node.query(f"SELECT toDateTime64('{datetime}', {precision}, '{timezone}')")
|
||||
|
||||
with Then(f"I expect {expected}"):
|
||||
assert r.output == expected, error()
|
||||
|
||||
|
||||
@TestStep(When)
|
||||
def exec_query(self, request, expected=None, exitcode=None):
|
||||
"""Execute a query and check expected result.
|
||||
:param request: query string
|
||||
:param expected: result string
|
||||
:param exitcode: exitcode
|
||||
"""
|
||||
r = self.context.node.query(request)
|
||||
|
||||
if expected is not None:
|
||||
with Then(f"output should match the expected", description=f"{expected}"):
|
||||
assert r.output == expected, error()
|
||||
|
||||
elif exitcode is not None:
|
||||
with Then(f"output exitcode should match expected", description=f"{exitcode}"):
|
||||
assert r.exitcode == exitcode, error()
|
||||
|
||||
|
||||
@TestStep
|
||||
def walk_datetime_in_incrementing_steps(self, date, hrs_range=(0, 24), step=1, timezone="UTC", precision=0):
|
||||
"""Sweep time starting from some start date. The time is incremented
|
||||
in steps specified by the `step` parameter
|
||||
(default: 1 min).
|
||||
|
||||
:param hrs_range: range in hours
|
||||
:param step: step in minutes
|
||||
"""
|
||||
|
||||
stress = self.context.stress
|
||||
|
||||
tasks = []
|
||||
pool = Pool(4)
|
||||
secs = f"00{'.' * (precision > 0)}{'0' * precision}"
|
||||
|
||||
try:
|
||||
with When(f"I loop through datetime range {hrs_range} starting from {date} in {step}min increments"):
|
||||
for hrs in range(*hrs_range) if stress else (hrs_range[0], hrs_range[1]-1):
|
||||
for mins in range(0, 60, step) if stress else (0, 59):
|
||||
datetime = f"{date} {str(hrs).zfill(2)}:{str(mins).zfill(2)}:{secs}"
|
||||
expected = datetime
|
||||
|
||||
with When(f"time is {datetime}"):
|
||||
run_scenario(pool, tasks, Test(name=f"{hrs}:{mins}:{secs}", test=select_check_datetime),
|
||||
kwargs=dict(datetime=datetime, precision=precision, timezone=timezone,
|
||||
expected=expected))
|
||||
finally:
|
||||
join(tasks)
|
||||
|
||||
|
||||
@TestStep
|
||||
def walk_datetime_in_decrementing_steps(self, date, hrs_range=(23, 0), step=1, timezone="UTC", precision=0):
|
||||
"""Sweep time starting from some start date. The time is decremented
|
||||
in steps specified by the `step` parameter
|
||||
(default: 1 min).
|
||||
|
||||
:param date: string
|
||||
:param hrs_range: range in hours
|
||||
:param step: step in minutes
|
||||
:param timezone: String
|
||||
"""
|
||||
|
||||
stress = self.context.stress
|
||||
|
||||
tasks = []
|
||||
pool = Pool(4)
|
||||
secs = f"00{'.' * (precision > 0)}{'0' * precision}"
|
||||
|
||||
try:
|
||||
with When(f"I loop through datetime range {hrs_range} starting from {date} in {step}min decrements"):
|
||||
for hrs in range(*hrs_range, -1) if stress else (hrs_range[1], hrs_range[0]):
|
||||
for mins in range(59, 0, -step) if stress else (59, 0):
|
||||
datetime = f"{date} {str(hrs).zfill(2)}:{str(mins).zfill(2)}:{secs}"
|
||||
expected = datetime
|
||||
|
||||
with When(f"time is {datetime}"):
|
||||
run_scenario(pool, tasks, Test(name=f"{hrs}:{mins}:{secs}", test=select_check_datetime),
|
||||
kwargs=dict(datetime=datetime, precision=precision, timezone=timezone,
|
||||
expected=expected))
|
||||
finally:
|
||||
join(tasks)
|
File diff suppressed because it is too large
Load Diff
151
tests/testflows/datetime64_extended_range/tests/generic.py
Normal file
151
tests/testflows/datetime64_extended_range/tests/generic.py
Normal file
@ -0,0 +1,151 @@
|
||||
from testflows.core import *
|
||||
|
||||
from datetime64_extended_range.requirements.requirements import *
|
||||
from datetime64_extended_range.common import *
|
||||
from datetime64_extended_range.tests.common import *
|
||||
|
||||
import pytz
|
||||
import datetime
|
||||
import itertools
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_Start("1.0"),
|
||||
)
|
||||
def normal_range_start(self):
|
||||
"""Check DateTime64 can accept a dates around the start of the normal range that begins at 1970-01-01 00:00:00.000.
|
||||
"""
|
||||
with When("I do incrementing time sweep", description="check different time points in the first 24 hours at given date"):
|
||||
walk_datetime_in_incrementing_steps(date="1970-01-01", precision=3, hrs_range=(0, 24))
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_End("1.0")
|
||||
)
|
||||
def normal_range_end(self):
|
||||
"""Check DateTime64 can accept a dates around the end of the normal range that ends at 2105-12-31 23:59:59.99999.
|
||||
"""
|
||||
with When("I do decrementing time sweep",
|
||||
description="check different time points in the last 24 hours at given date"):
|
||||
walk_datetime_in_decrementing_steps(date="2105-12-31", precision=3, hrs_range=(23, 0))
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_Start("1.0")
|
||||
)
|
||||
def extended_range_start(self):
|
||||
"""Check DateTime64 supports dates around the beginning of the extended range that begins at 1698-01-01 00:00:00.000000.
|
||||
"""
|
||||
with When("I do incrementing time sweep",
|
||||
description="check different time points in the first 24 hours at given date"):
|
||||
walk_datetime_in_incrementing_steps(date="1925-01-01", precision=5, hrs_range=(0, 24))
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_End("1.0")
|
||||
)
|
||||
def extended_range_end(self, precision=3):
|
||||
"""Check DateTime64 supports dates around the beginning of the extended range that ends at 2377-12-31T23:59:59.999999.
|
||||
"""
|
||||
with When("I do decrementing time sweep",
|
||||
description="check different time points in the last 24 hours at given date"):
|
||||
walk_datetime_in_decrementing_steps(date="2238-12-31", precision=5, hrs_range=(23, 0))
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_Start_BeforeEpochForTimeZone("1.0")
|
||||
)
|
||||
def timezone_local_below_normal_range(self):
|
||||
"""Check how UTC normal range time value treated
|
||||
when current timezone time value is out of normal range.
|
||||
"""
|
||||
with When("I do incrementing time sweep",
|
||||
description="check different time points when UTC datetime fits normal range but below it for local datetime"):
|
||||
walk_datetime_in_incrementing_steps(date="1969-12-31", hrs_range=(17, 24), timezone='America/Phoenix')
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NormalRange_End_AfterEpochForTimeZone("1.0")
|
||||
)
|
||||
def timezone_local_above_normal_range(self):
|
||||
"""Check how UTC normal range time value treated
|
||||
when current timezone time value is out of normal range.
|
||||
"""
|
||||
with When("I do decrementing time sweep",
|
||||
description="check different time points when UTC datetime fits normal range but above it for local datetime"):
|
||||
walk_datetime_in_decrementing_steps(date="2106-01-01", hrs_range=(6, 0), timezone='Asia/Novosibirsk')
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_Comparison("1.0")
|
||||
)
|
||||
def comparison_check(self):
|
||||
"""Check how comparison works with DateTime64 extended range.
|
||||
"""
|
||||
stress = self.context.stress
|
||||
comparators = (">", "<", "==", "<=", ">=", "!=")
|
||||
timezones = timezones_range(stress=stress)
|
||||
datetimes = []
|
||||
|
||||
for year in years_range(stress=stress):
|
||||
datetimes += list(select_dates_in_year(year=year, stress=stress))
|
||||
|
||||
for dt_1, dt_2 in itertools.product(datetimes, datetimes):
|
||||
for tz1, tz2 in itertools.product(timezones, timezones):
|
||||
dt1_str = dt_1.strftime("%Y-%m-%d %H:%M:%S")
|
||||
dt2_str = dt_2.strftime("%Y-%m-%d %H:%M:%S")
|
||||
dt1 = pytz.timezone(tz1).localize(dt_1)
|
||||
dt2 = pytz.timezone(tz2).localize(dt_2)
|
||||
|
||||
with When(f"{dt1_str} {tz1}, {dt2_str} {tz2}"):
|
||||
for c in comparators:
|
||||
expr = f"dt1 {c} dt2"
|
||||
expected = str(int(eval(expr)))
|
||||
with Then(f"I check {dt1_str} {c} {dt2_str}"):
|
||||
query = f"SELECT toDateTime64('{dt1_str}', 0, '{tz1}') {c} toDateTime64('{dt2_str}', 0, '{tz2}')"
|
||||
exec_query(request=query, expected=expected)
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TimeZones("1.0")
|
||||
)
|
||||
def timezones_support(self):
|
||||
"""Check how timezones work with DateTime64 extended range.
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress=stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for dt in datetimes:
|
||||
for tz in timezones:
|
||||
with Step(f"{dt} {tz}"):
|
||||
with By("computing expected output using python"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with And("forming a toTimeZone ClickHouse query"):
|
||||
query = f"SELECT toDateTime64('{dt_str}', 0, '{tz}')"
|
||||
with Then(f"I execute query", flags=TE):
|
||||
exec_query(request=query, expected=f"{dt_str}")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@TestFeature
|
||||
def generic(self, node="clickhouse1"):
|
||||
"""Check the basic operations with DateTime64
|
||||
"""
|
||||
self.context.node = self.context.cluster.node(node)
|
||||
|
||||
for scenario in loads(current_module(), Scenario, Suite):
|
||||
Scenario(run=scenario, flags=TE)
|
@ -0,0 +1,164 @@
|
||||
from testflows.core import *
|
||||
import datetime
|
||||
|
||||
from datetime64_extended_range.requirements.requirements import *
|
||||
from datetime64_extended_range.tests.common import *
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_InvalidDate("1.0")
|
||||
)
|
||||
def invalid_date(self):
|
||||
"""Check how non-existent date is treated.
|
||||
For example, check 31st day in month that only has 30 days.
|
||||
"""
|
||||
date_range = [1930, 1980, 2230]
|
||||
|
||||
if self.context.stress:
|
||||
date_range = range(1925, 2238)
|
||||
|
||||
with When("I check 31st day of a 30-day month"):
|
||||
for year in date_range:
|
||||
for month in (4, 6, 9, 11):
|
||||
datetime = f"{year}-{str(month).zfill(2)}-31 12:23:34"
|
||||
expected = f"{year}-{str(month + 1).zfill(2)}-01 12:23:34"
|
||||
|
||||
with Then(f"{datetime}", description=f"expected {expected}", flags=TE):
|
||||
select_check_datetime(datetime=datetime, expected=expected)
|
||||
|
||||
|
||||
@TestOutline(Suite)
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_InvalidTime("1.0")
|
||||
)
|
||||
@Examples(
|
||||
"datetime expected timezone", [
|
||||
('2002-04-07 02:30:00', '2002-04-07 01:30:00', 'America/New_York'),
|
||||
('2020-03-29 02:30:00', '2020-03-29 01:30:00', 'Europe/Zurich'),
|
||||
('2017-03-26 02:30:00', '2017-03-26 01:30:00', 'Europe/Berlin')
|
||||
])
|
||||
def invalid_time(self, datetime, expected, timezone='UTC'):
|
||||
"""proper handling of invalid time for a timezone
|
||||
when using DateTime64 extended range data type, for example,
|
||||
2:30am on 7th April 2002 never happened at all in the US/Eastern timezone,
|
||||
"""
|
||||
with When(f"I check non-existent datetime {datetime}"):
|
||||
select_check_datetime(datetime=datetime, expected=expected, timezone=timezone)
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime("1.0"),
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime_Disappeared("1.0")
|
||||
)
|
||||
@Examples(
|
||||
"tz time_dates", [
|
||||
('America/Denver', {'02:30:00': ('2018-03-11', '2020-03-08', '1980-04-27', '1942-02-09')}),
|
||||
('Europe/Zurich', {'02:30:00': ('2016-03-27', '2020-03-29', '1981-03-29'), '01:30:00': ('1942-05-04', )})
|
||||
])
|
||||
def dst_disappeared(self, tz, time_dates):
|
||||
"""Proper handling of switching DST, when an hour is being skipped.
|
||||
Testing in 2 steps: first, try to make a DateTime64 with skipped time value.
|
||||
Second, adding interval so that result is in the skipped time.
|
||||
"""
|
||||
for time, dates in time_dates.items():
|
||||
for date in dates:
|
||||
with Given(f"forming a datetime"):
|
||||
dt_str = f"{date} {time}"
|
||||
dt = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S")
|
||||
with Step("Assignment test"):
|
||||
with When("computing expected result"):
|
||||
dt -= datetime.timedelta(hours=1)
|
||||
expected = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with Then(f"I check skipped hour"):
|
||||
select_check_datetime(datetime=dt_str, expected=expected, timezone=tz)
|
||||
with Step("Addition test"):
|
||||
with When("computing expected result"):
|
||||
dt += datetime.timedelta(hours=2)
|
||||
expected = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with Then(f"I check skipped hour"):
|
||||
query = f"SELECT addHours(toDateTime64('{dt_str}', 0, '{tz}'), 1)"
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_LeapSeconds("1.0")
|
||||
)
|
||||
@Examples(
|
||||
"datet years", [
|
||||
("06-30 23:59:55", [1972, 1981, 1982, 1983, 1985, 1992, 1993, 1994, 1997, 2012, 2015]),
|
||||
("12-31 23:59:55", [1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1987, 1989, 1990, 1995, 1998, 2005, 2008, 2016])
|
||||
])
|
||||
def leap_seconds(self, datet, years):
|
||||
"""Test proper handling of leap seconds. Read more: https://de.wikipedia.org/wiki/Schaltsekunde
|
||||
Being checked by selecting a timestamp prior to leap second and adding seconds so that the result is after it.
|
||||
"""
|
||||
for year in years:
|
||||
with When(f"{datet}, {year}"):
|
||||
with By("forming an expected result using python"):
|
||||
dt_str = f"{year}-{datet}"
|
||||
dt = datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S')
|
||||
dt += datetime.timedelta(seconds=9)
|
||||
expected = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with And(f"forming a query"):
|
||||
query = f"SELECT addSeconds(toDateTime64('{dt_str}', 0, 'UTC'), 10)"
|
||||
with Then("executing query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_DaylightSavingTime("1.0"),
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime_TimeZoneSwitch("1.0")
|
||||
)
|
||||
def dst_time_zone_switch(self):
|
||||
"""Check how ClickHouse supports handling of invalid time when using DateTime64 extended range data type
|
||||
when the invalid time is caused when countries switch timezone definitions with no daylight savings time switch.
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
utc = pytz.timezone("UTC")
|
||||
|
||||
for timezone in timezones:
|
||||
if timezone == 'UTC':
|
||||
continue
|
||||
with Step(f"{timezone}"):
|
||||
tz = pytz.timezone(timezone)
|
||||
transition_times = tz._utc_transition_times
|
||||
transition_info = tz._transition_info
|
||||
|
||||
for i in range(len(transition_times)-1, 0, -1):
|
||||
if (transition_times[i] > datetime.datetime.now()) or (transition_times[i].year < 1925) or (transition_times[i].year > 2238):
|
||||
continue
|
||||
with Step(f"{transition_times[i]}"):
|
||||
with By("localize python datetime"):
|
||||
dt = transition_times[i]
|
||||
dt0 = dt - datetime.timedelta(hours=4)
|
||||
dt0 = utc.localize(dt0).astimezone(tz).replace(tzinfo=None)
|
||||
with And("compute expected result using Pytz"):
|
||||
seconds_shift = transition_info[i][0] - transition_info[i-1][0]
|
||||
dt1 = dt0 + datetime.timedelta(hours=8) + seconds_shift
|
||||
dt0_str = dt0.strftime("%Y-%m-%d %H:%M:%S")
|
||||
dt1_str = dt1.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with And("forming a ClickHouse query"):
|
||||
query = f"SELECT addHours(toDateTime64('{dt0_str}', 0, '{timezone}'), 8)"
|
||||
with Then("executing the query"):
|
||||
exec_query(request=query, expected=f"{dt1_str}")
|
||||
|
||||
|
||||
|
||||
|
||||
@TestFeature
|
||||
@Name("non existent time")
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_NonExistentTime("1.0")
|
||||
)
|
||||
def feature(self, node="clickhouse1"):
|
||||
"""Check how ClickHouse treats non-existent time in DateTime64 data type.
|
||||
"""
|
||||
self.context.node = self.context.cluster.node(node)
|
||||
|
||||
for scenario in loads(current_module(), Scenario, Suite):
|
||||
Scenario(run=scenario, flags=TE)
|
@ -0,0 +1,38 @@
|
||||
import pytz
|
||||
from datetime import datetime
|
||||
|
||||
from testflows.core import *
|
||||
from datetime64_extended_range.common import *
|
||||
from datetime64_extended_range.tests.common import select_check_datetime
|
||||
from datetime64_extended_range.requirements.requirements import *
|
||||
from datetime64_extended_range.tests.common import *
|
||||
|
||||
|
||||
@TestSuite
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_SpecificTimestamps("1.0")
|
||||
)
|
||||
def reference_times(self, node="clickhouse1"):
|
||||
"""Check how ClickHouse converts a set of particular timestamps
|
||||
to DateTime64 for all timezones and compare the result to pytz.
|
||||
"""
|
||||
self.context.node = self.context.cluster.node(node)
|
||||
|
||||
timestamps = [9961200, 73476000, 325666800, 354675600, 370400400, 386125200, 388566010, 401850000, 417574811,
|
||||
496803600, 528253200, 624423614, 636516015, 671011200, 717555600, 752047218, 859683600, 922582800,
|
||||
1018173600, 1035705600, 1143334800, 1162105223, 1174784400, 1194156000, 1206838823, 1224982823,
|
||||
1236495624, 1319936400, 1319936424, 1425798025, 1459040400, 1509872400, 2090451627, 2140668000]
|
||||
|
||||
query = ""
|
||||
|
||||
for tz in pytz.all_timezones:
|
||||
timezone = pytz.timezone(tz)
|
||||
query += f"select '{tz}', arrayJoin(arrayFilter(x -> x.2 <> x.3, arrayMap(x -> tuple(x.1, x.2, toString(toDateTime64(x.1, 0, '{tz}'))), ["
|
||||
need_comma = 0
|
||||
for timestamp in timestamps:
|
||||
for reference_timestamp in [timestamp - 1, timestamp, timestamp + 1]:
|
||||
query += f"{',' if need_comma else ''}tuple({reference_timestamp},'{datetime.datetime.fromtimestamp(reference_timestamp, timezone).strftime('%Y-%m-%d %H:%M:%S')}')"
|
||||
need_comma = 1
|
||||
query += "] ) ) );"
|
||||
|
||||
exec_query(request=query, expected="")
|
@ -0,0 +1,479 @@
|
||||
import time
|
||||
import pytz
|
||||
import decimal
|
||||
import itertools
|
||||
import numpy as np
|
||||
from dateutil.tz import tzlocal
|
||||
from datetime import datetime, timedelta
|
||||
import dateutil.relativedelta as rd
|
||||
from testflows.core import *
|
||||
|
||||
from datetime64_extended_range.requirements.requirements import *
|
||||
from datetime64_extended_range.common import *
|
||||
from datetime64_extended_range.tests.common import *
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Examples("cast", [
|
||||
(False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toInt_8_16_32_64_128_256_("1.0"))),
|
||||
(True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0")))
|
||||
])
|
||||
def to_int_8_16_32_64_128_256(self, cast):
|
||||
"""Check the toInt(8|16|32|64|128|256) functions with DateTime64 extended range
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for d in datetimes:
|
||||
for tz in timezones:
|
||||
dt = pytz.timezone(tz).localize(d)
|
||||
for int_type in (8, 16, 32, 64, 128, 256):
|
||||
with When(f"{dt} {tz}, int{int_type}"):
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with And("computing the expected result using python"):
|
||||
py_res = int(time.mktime(dt.timetuple()))
|
||||
expected = f"{py_res}"
|
||||
if not (int_type == 128 or int_type == 256):
|
||||
"""ALL dates fit into int128 and int256, so no need to check"""
|
||||
np_res = exec(f"np.int{int_type}({py_res})")
|
||||
else:
|
||||
np_res = py_res
|
||||
if np_res == py_res:
|
||||
with Given(f"{py_res} fits int{int_type}"):
|
||||
with When(f"making a query string for ClickHouse if py_res fits int{int_type}"):
|
||||
if cast:
|
||||
query = f"SELECT cast(toDateTime64('{dt_str}', 0, '{tz}'), 'Int{int_type}')"
|
||||
else:
|
||||
query = f"SELECT toInt{int_type}(toDateTime64('{dt_str}', 0, '{tz}'))"
|
||||
with Then(f"I execute toInt{int_type}() query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Examples("cast", [
|
||||
(False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUInt_8_16_32_64_256_("1.0"))),
|
||||
(True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0")))
|
||||
])
|
||||
def to_uint_8_16_32_64_256(self, cast):
|
||||
"""Check the toUInt(8|16|32|64|256) functions with DateTime64 extended range
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for d in datetimes:
|
||||
for tz in timezones:
|
||||
dt = pytz.timezone(tz).localize(d)
|
||||
for int_type in (8, 16, 32, 64, 256):
|
||||
with Step(f"{dt} {tz}, int{int_type}"):
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with And("computing the expected result using python"):
|
||||
py_res = int(time.mktime(dt.timetuple()))
|
||||
expected = f"{py_res}"
|
||||
if int_type != 256:
|
||||
"""ALL dates fit into uint256, so no need to check"""
|
||||
np_res = exec(f"np.uint{int_type}({py_res})")
|
||||
else:
|
||||
np_res = py_res
|
||||
if np_res == py_res:
|
||||
with Given(f"{py_res} fits int{int_type}"):
|
||||
with When(f"making a query string for ClickHouse if py_res fits int{int_type}"):
|
||||
if cast:
|
||||
query = f"SELECT cast(toDateTime64('{dt_str}', 0, '{tz}'), 'UInt{int_type}')"
|
||||
else:
|
||||
query = f"SELECT toUInt{int_type}(toDateTime64('{dt_str}', 0, '{tz}'))"
|
||||
with Then(f"I execute toInt{int_type}() query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Examples("cast", [
|
||||
(False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toFloat_32_64_("1.0"))),
|
||||
(True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0")))
|
||||
])
|
||||
def to_float_32_64(self, cast):
|
||||
"""Check the toFloat(32|64) functions with DateTime64 extended range
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for d in datetimes:
|
||||
for tz in timezones:
|
||||
dt = pytz.timezone(tz).localize(d)
|
||||
for float_type in (32, 64):
|
||||
with Step(f"{dt} {tz}, int{float_type}"):
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with And("computing the expected result using python"):
|
||||
py_res = int(time.mktime(dt.timetuple()))
|
||||
expected = f"{py_res}"
|
||||
np_res = exec(f"np.float{float_type}({py_res})")
|
||||
if np_res == py_res:
|
||||
with When(f"making a query string for ClickHouse"):
|
||||
if cast:
|
||||
query = f"SELECT cast(toDateTime64('{dt_str}', 0, '{tz}'), 'Float{float_type}')"
|
||||
else:
|
||||
query = f"SELECT toFloat{float_type}(toDateTime64('{dt_str}', 0, '{tz}'))"
|
||||
with Then(f"I execute toFloat{float_type}() query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime64_FromString_MissingTime("1.0")
|
||||
)
|
||||
def to_datetime64_from_string_missing_time(self):
|
||||
"""Check the toDateTime64() with DateTime64 extended range conversion when string is missing the time part.
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for dt in datetimes:
|
||||
for tz in timezones:
|
||||
with Step(f"{dt} {tz}"):
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d")
|
||||
with And("figure out expected result in python"):
|
||||
expected = f"{dt.strftime('%Y-%m-%d')} 00:00:00"
|
||||
with When(f"making a query string for ClickHouse"):
|
||||
query = f"SELECT toDateTime64('{dt_str}', 0, '{tz}')"
|
||||
with Then(f"I execute toDateTime64() query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime64("1.0")
|
||||
)
|
||||
def to_datetime64(self):
|
||||
"""Check the toDateTime64() conversion with DateTime64. This is supposed to work in normal range ONLY.
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for dt in datetimes:
|
||||
for tz in timezones:
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with And(f"making a query string for ClickHouse"):
|
||||
query = f"SELECT toDateTime64('{dt_str}', 0, '{tz}')"
|
||||
with When("figure out expected result in python"):
|
||||
expected = f"{dt.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
with Then(f"I execute toDateTime64() query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Examples("cast", [
|
||||
(False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDate("1.0"))),
|
||||
(True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0")))
|
||||
])
|
||||
def to_date(self, cast):
|
||||
"""Check the toDate() conversion with DateTime64. This is supposed to work in normal range ONLY.
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for dt in datetimes:
|
||||
for tz in timezones:
|
||||
with Step(f"{dt} {tz}"):
|
||||
expected = None # by default - not checked, checking the exitcode
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
if in_normal_range(dt):
|
||||
with And("DateTime64 fits normal range, change its value"):
|
||||
expected = f"{dt.strftime('%Y-%m-%d')}" # being checked in case DateTime64 fits normal range
|
||||
|
||||
with Given(f"I make a query string for ClickHouse"):
|
||||
if cast:
|
||||
query = f"SELECT CAST(toDateTime64('{dt_str}', 0, '{tz}'), 'Date')"
|
||||
else:
|
||||
query = f"SELECT toDate(toDateTime64('{dt_str}', 0, '{tz}'))"
|
||||
|
||||
with Then(f"I execute toDate() query and check return/exitcode"):
|
||||
exec_query(request=query, expected=expected, exitcode=0)
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Examples("cast", [
|
||||
(False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDateTime("1.0"))),
|
||||
(True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0")))
|
||||
])
|
||||
def to_datetime(self, cast):
|
||||
"""Check the toDateTime() conversion with DateTime64. This is supposed to work in normal range ONLY.
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for dt in datetimes:
|
||||
for tz in timezones:
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with And(f"making a query string for ClickHouse"):
|
||||
if cast:
|
||||
query = f"SELECT CAST(toDateTime64('{dt_str}', 0, '{tz}'), 'DateTime')"
|
||||
with When("figure out expected result in python"):
|
||||
dt_local = pytz.timezone(tz).localize(dt)
|
||||
dt_transformed = dt_local.astimezone(tzlocal())
|
||||
expected = f"{dt_transformed.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
else:
|
||||
query = f"SELECT toDateTime(toDateTime64('{dt_str}', 0, '{tz}'))"
|
||||
with When("figure out expected result in python"):
|
||||
expected = f"{dt.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
|
||||
if not in_normal_range(dt):
|
||||
with When(f"I execute toDateTime() query out of normal range"):
|
||||
exec_query(request=query, exitcode=0)
|
||||
else:
|
||||
with When(f"I execute toDateTime() query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Examples("cast", [
|
||||
(False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toString("1.0"))),
|
||||
(True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0")))
|
||||
])
|
||||
def to_string(self, cast):
|
||||
"""Check the toString() with DateTime64 extended range.
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for dt in datetimes:
|
||||
for tz in timezones:
|
||||
with Step(f"{dt} {tz}"):
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with When(f"making a query string for ClickHouse"):
|
||||
if cast:
|
||||
query = f"SELECT cast(toDateTime64('{dt_str}', 0, '{tz}'), 'String')"
|
||||
else:
|
||||
query = f"SELECT toString(toDateTime64('{dt_str}', 0, '{tz}'))"
|
||||
with Then(f"I execute toDateTime64() query"):
|
||||
exec_query(request=query, expected=f"{dt_str}")
|
||||
|
||||
|
||||
def valid_decimal_range(bit_depth, S):
|
||||
"""A helper to find valid range for Decimal(32|64|128|256) with given scale (S)"""
|
||||
return {32: -1 * 10 ** (9 - S), 64: -1 * 10 ** (18 - S), 128: -1 * 10 ** (38 - S), 256: -1 * 10 ** (76 - S)}[
|
||||
bit_depth]
|
||||
|
||||
|
||||
@TestOutline(Scenario)
|
||||
@Examples("cast", [
|
||||
(False, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toDecimal_32_64_128_256_("1.0"))),
|
||||
(True, Requirements(RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_CAST_x_T_("1.0")))
|
||||
])
|
||||
def to_decimal_32_64_128_256(self, cast):
|
||||
"""Check the toDecimal(32|64|128|256) functions with DateTime64 extended range.
|
||||
Decimal32(S) - ( -1 * 10^(9 - S), 1 * 10^(9 - S) )
|
||||
Decimal64(S) - ( -1 * 10^(18 - S), 1 * 10^(18 - S) )
|
||||
Decimal128(S) - ( -1 * 10^(38 - S), 1 * 10^(38 - S) )
|
||||
Decimal256(S) - ( -1 * 10^(76 - S), 1 * 10^(76 - S) )
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
scales = {32: 9, 64: 18, 128: 38, 256: 76}
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress)
|
||||
|
||||
for d in datetimes:
|
||||
for tz in timezones:
|
||||
dt = pytz.timezone(tz).localize(d)
|
||||
for decimal_type in (32, 64, 128, 256):
|
||||
for scale in range(scales[decimal_type]):
|
||||
with When(f"{dt} {tz}, Decimal{decimal_type}({scale})"):
|
||||
valid_range = valid_decimal_range(bit_depth=decimal_type, S=scale)
|
||||
with By("computing the expected result using python"):
|
||||
expected = decimal.Decimal(time.mktime(dt.timetuple()))
|
||||
if -valid_range < expected < valid_range:
|
||||
with And("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S")
|
||||
with When(f"making a query string for ClickHouse"):
|
||||
if cast:
|
||||
query = f"SELECT cast(toDateTime64('{dt_str}', 0, '{tz}'), 'Decimal({decimal_type}, {scale})')"
|
||||
else:
|
||||
query = f"SELECT toDecimal{decimal_type}(toDateTime64('{dt_str}', 0, '{tz}'))"
|
||||
with Then(f"I execute toDecimal{decimal_type}() query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestOutline
|
||||
def to_unix_timestamp64_milli_micro_nano(self, scale):
|
||||
"""Check the toUnixTimestamp64[Milli/Micro/Nano] functions with DateTime64 extended range.
|
||||
:param scale: 3 for milli, 6 for micro, 9 for nano; int
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
func = {3: 'Milli', 6: 'Micro', 9: 'Nano'}
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True)
|
||||
|
||||
for d in datetimes:
|
||||
for tz in timezones:
|
||||
dt = pytz.timezone(tz).localize(d)
|
||||
with When(f"{dt} {tz}"):
|
||||
with By("converting datetime to string"):
|
||||
dt_str = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
with And("converting DateTime to UTC"):
|
||||
dt = dt.astimezone(pytz.timezone('UTC'))
|
||||
with And("computing the expected result using python"):
|
||||
expected = int(dt.timestamp() * (10**scale))
|
||||
if expected >= 0:
|
||||
expected += dt.microsecond * 10 ** (scale - 6)
|
||||
else:
|
||||
expected -= dt.microsecond * 10 ** (scale - 6)
|
||||
with When(f"making a query string for ClickHouse"):
|
||||
query = f"SELECT toUnixTimestamp64{func[scale]}(toDateTime64('{dt_str}', {scale}, '{tz}'))"
|
||||
with Then(f"I execute toUnixTimestamp64{func[scale]}() query"):
|
||||
exec_query(request=query, expected=f"{expected}")
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Milli("1.0")
|
||||
)
|
||||
def to_unix_timestamp64_milli(self):
|
||||
"""Check the toUnixTimestamp64Milli functions with DateTime64 extended range.
|
||||
"""
|
||||
to_unix_timestamp64_milli_micro_nano(scale=3)
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Micro("1.0")
|
||||
)
|
||||
def to_unix_timestamp64_micro(self):
|
||||
"""Check the toUnixTimestamp64Micro functions with DateTime64 extended range.
|
||||
"""
|
||||
to_unix_timestamp64_milli_micro_nano(scale=6)
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_toUnixTimestamp64Nano("1.0")
|
||||
)
|
||||
def to_unix_timestamp64_nano(self):
|
||||
"""Check the toUnixTimestamp64Nano functions with DateTime64 extended range.
|
||||
"""
|
||||
to_unix_timestamp64_milli_micro_nano(scale=9)
|
||||
|
||||
|
||||
@TestOutline
|
||||
def from_unix_timestamp64_milli_micro_nano(self, scale):
|
||||
"""Check the fromUnixTimestamp64[Milli/Micro/Nano] functions with DateTime64 extended range.
|
||||
:param scale: 3 for milli, 6 for micro, 9 for nano; int
|
||||
"""
|
||||
stress = self.context.stress
|
||||
timezones = timezones_range(stress)
|
||||
func = {3: 'Milli', 6: 'Micro', 9: 'Nano'}
|
||||
|
||||
for year in years_range(stress):
|
||||
with Given("I select datetimes in a year"):
|
||||
datetimes = select_dates_in_year(year=year, stress=stress, microseconds=True)
|
||||
|
||||
for d in datetimes:
|
||||
for tz in timezones:
|
||||
dt = pytz.timezone(tz).localize(d)
|
||||
with When(f"{dt} {tz}"):
|
||||
with By("converting datetime to string"):
|
||||
d_str = d.strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
d_str += "0" * (scale-3)
|
||||
with And("converting DateTime64 to UTC"):
|
||||
dt = dt.astimezone(pytz.timezone('UTC'))
|
||||
with And("computing the expected result using python"):
|
||||
ts = int(dt.timestamp() * (10**scale))
|
||||
if ts >= 0:
|
||||
ts += dt.microsecond * 10 ** (scale - 6)
|
||||
else:
|
||||
ts -= dt.microsecond * 10 ** (scale - 6)
|
||||
with And(f"making a query string for ClickHouse"):
|
||||
query = f"SELECT fromUnixTimestamp64{func[scale]}(CAST({ts}, 'Int64'), '{tz}')"
|
||||
with Then(f"I execute fromUnixTimestamp64{func[scale]}() query"):
|
||||
exec_query(request=query, expected=f"{d_str}")
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Milli("1.0")
|
||||
)
|
||||
def from_unix_timestamp64_milli(self):
|
||||
"""Check the fromUnixTimestamp64Milli functions with DateTime64 extended range.
|
||||
"""
|
||||
from_unix_timestamp64_milli_micro_nano(scale=3)
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Micro("1.0")
|
||||
)
|
||||
def from_unix_timestamp64_micro(self):
|
||||
"""Check the fromUnixTimestamp64Micro functions with DateTime64 extended range.
|
||||
"""
|
||||
from_unix_timestamp64_milli_micro_nano(scale=6)
|
||||
|
||||
|
||||
@TestScenario
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions_fromUnixTimestamp64Nano("1.0")
|
||||
)
|
||||
def from_unix_timestamp64_nano(self):
|
||||
"""Check the fromUnixTimestamp64Nano functions with DateTime64 extended range.
|
||||
"""
|
||||
from_unix_timestamp64_milli_micro_nano(scale=9)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@TestFeature
|
||||
@Requirements(
|
||||
RQ_SRS_010_DateTime64_ExtendedRange_TypeConversionFunctions("1.0")
|
||||
)
|
||||
def type_conversion(self, node="clickhouse1"):
|
||||
"""Check the type conversion operations with DateTime64. Cast can be set as Requirement thereby as the module
|
||||
tests exactly what CAST does.
|
||||
"""
|
||||
self.context.node = self.context.cluster.node(node)
|
||||
|
||||
for scenario in loads(current_module(), Scenario):
|
||||
Scenario(run=scenario, flags=TE)
|
@ -278,7 +278,7 @@ class Cluster(object):
|
||||
if not os.path.exists(docker_compose_file_path):
|
||||
raise TypeError("docker compose file '{docker_compose_file_path}' does not exist")
|
||||
|
||||
self.docker_compose += f" --no-ansi --project-directory \"{docker_compose_project_dir}\" --file \"{docker_compose_file_path}\""
|
||||
self.docker_compose += f" --project-directory \"{docker_compose_project_dir}\" --file \"{docker_compose_file_path}\""
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def shell(self, node, timeout=300):
|
||||
|
@ -19,7 +19,8 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
# Feature(test=load("rbac.regression", "regression"))(**args)
|
||||
# Feature(test=load("aes_encryption.regression", "regression"))(**args)
|
||||
Feature(test=load("map_type.regression", "regression"))(**args)
|
||||
# Feature(test=load("window_functions.regression", "regression"))(**args)
|
||||
Feature(test=load("window_functions.regression", "regression"))(**args)
|
||||
# Feature(test=load("datetime64_extended_range.regression", "regression"))(**args)
|
||||
# Feature(test=load("kerberos.regression", "regression"))(**args)
|
||||
|
||||
if main():
|
||||
|
@ -202,7 +202,13 @@ All the updates are tracked using the [Revision History].
|
||||
|
||||
## Introduction
|
||||
|
||||
This software requirements specification covers requirements for `Map(key, value)` data type in [ClickHouse].
|
||||
This software requirements specification covers requirements for supporting window functions in [ClickHouse].
|
||||
Similar functionality exists in [MySQL] and [PostreSQL]. [PostreSQL] defines a window function as follows:
|
||||
|
||||
> A window function performs a calculation across a set of table rows that are somehow related to the current row.
|
||||
> This is comparable to the type of calculation that can be done with an aggregate function.
|
||||
> But unlike regular aggregate functions, use of a window function does not cause rows to
|
||||
> become grouped into a single output row — the rows retain their separate identities.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -2290,3 +2296,5 @@ version: 1.0
|
||||
[Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/window_functions/requirements/requirements.md
|
||||
[Git]: https://git-scm.com/
|
||||
[GitHub]: https://github.com
|
||||
[PostreSQL]: https://www.postgresql.org/docs/9.2/tutorial-window.html
|
||||
[MySQL]: https://dev.mysql.com/doc/refman/8.0/en/window-functions.html
|
||||
|
@ -1,6 +1,6 @@
|
||||
# These requirements were auto generated
|
||||
# from software requirements specification (SRS)
|
||||
# document by TestFlows v1.6.210312.1172513.
|
||||
# document by TestFlows v1.6.210505.1133630.
|
||||
# Do not edit by hand but re-generate instead
|
||||
# using 'tfs requirements generate' command.
|
||||
from testflows.core import Specification
|
||||
@ -3796,7 +3796,13 @@ All the updates are tracked using the [Revision History].
|
||||
|
||||
## Introduction
|
||||
|
||||
This software requirements specification covers requirements for `Map(key, value)` data type in [ClickHouse].
|
||||
This software requirements specification covers requirements for supporting window functions in [ClickHouse].
|
||||
Similar functionality exists in [MySQL] and [PostreSQL]. [PostreSQL] defines a window function as follows:
|
||||
|
||||
> A window function performs a calculation across a set of table rows that are somehow related to the current row.
|
||||
> This is comparable to the type of calculation that can be done with an aggregate function.
|
||||
> But unlike regular aggregate functions, use of a window function does not cause rows to
|
||||
> become grouped into a single output row — the rows retain their separate identities.
|
||||
|
||||
## Requirements
|
||||
|
||||
@ -5884,4 +5890,6 @@ version: 1.0
|
||||
[Revision History]: https://github.com/ClickHouse/ClickHouse/commits/master/tests/testflows/window_functions/requirements/requirements.md
|
||||
[Git]: https://git-scm.com/
|
||||
[GitHub]: https://github.com
|
||||
[PostreSQL]: https://www.postgresql.org/docs/9.2/tutorial-window.html
|
||||
[MySQL]: https://dev.mysql.com/doc/refman/8.0/en/window-functions.html
|
||||
''')
|
||||
|
@ -100,7 +100,7 @@ def t1_table(self, name="t1", distributed=False):
|
||||
create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, f1 % toUInt8(getMacro('shard')))"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
@ -155,7 +155,7 @@ def datetimes_table(self, name="datetimes", distributed=False):
|
||||
create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, id % toUInt8(getMacro('shard')))"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
@ -213,7 +213,7 @@ def numerics_table(self, name="numerics", distributed=False):
|
||||
create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, id % toUInt8(getMacro('shard')))"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
@ -282,7 +282,7 @@ def tenk1_table(self, name="tenk1", distributed=False):
|
||||
create_table(name=name + '_source', statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, unique1 % toUInt8(getMacro('shard')))"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating table with data"):
|
||||
@ -353,7 +353,7 @@ def empsalary_table(self, name="empsalary", distributed=False):
|
||||
create_table(name=name + "_source", statement=sql, on_cluster="sharded_cluster")
|
||||
|
||||
with And("a distributed table"):
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, rand())"
|
||||
sql = "CREATE TABLE {name} AS " + name + '_source' + " ENGINE = Distributed(sharded_cluster, default, " + f"{name + '_source'}, empno % toUInt8(getMacro('shard')))"
|
||||
table = create_table(name=name, statement=sql)
|
||||
|
||||
with And("populating distributed table with data"):
|
||||
|
Loading…
Reference in New Issue
Block a user