Merge branch 'master' into memory-overcommit-free

This commit is contained in:
mergify[bot] 2022-04-26 10:15:18 +00:00 committed by GitHub
commit 7571387a6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
147 changed files with 5529 additions and 596 deletions

View File

@ -1,24 +1,8 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
"on":
# push:
# branches: [ master ]
# pull_request:
# # The branches below must be a subset of the branches above
# branches: [ master ]
schedule:
- cron: '0 */6 * * *'
- cron: '0 0 * * *'
workflow_dispatch:
env:
@ -38,8 +22,6 @@ jobs:
fail-fast: false
matrix:
language: ['cpp']
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://git.io/codeql-language-support
steps:
- name: Checkout repository
@ -47,27 +29,14 @@ jobs:
with:
submodules: 'true'
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
# - name: Autobuild
# uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
- name: Build
run: |
sudo apt-get install -yq git cmake python ninja-build
sudo apt-get install -yq ninja-build
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
mkdir build
cd build

View File

@ -328,23 +328,9 @@ if (ENABLE_BUILD_PROFILING)
endif ()
endif ()
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
# We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now.
if (COMPILER_GCC OR COMPILER_CLANG)
# to make numeric_limits<__int128> works with GCC
set (_CXX_STANDARD "gnu++2a")
else ()
set (_CXX_STANDARD "c++2a")
endif ()
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
else ()
set (CMAKE_CXX_STANDARD 20)
set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
set (CMAKE_CXX_STANDARD_REQUIRED ON)
endif ()
set (CMAKE_CXX_STANDARD 20)
set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
set (CMAKE_CXX_STANDARD_REQUIRED ON)
set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON)

View File

@ -2,10 +2,6 @@
option (ENABLE_CLANG_TIDY "Use clang-tidy static analyzer" OFF)
if (ENABLE_CLANG_TIDY)
if (${CMAKE_VERSION} VERSION_LESS "3.6.0")
message(FATAL_ERROR "clang-tidy requires CMake version at least 3.6.")
endif()
find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-13" "clang-tidy-12" "clang-tidy-11" "clang-tidy-10" "clang-tidy-9" "clang-tidy-8")
if (CLANG_TIDY_PATH)

View File

@ -1,22 +1,15 @@
if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
if (CMAKE_LIBRARY_ARCHITECTURE MATCHES "i386")
message (FATAL_ERROR "32bit platforms are not supported")
endif ()
set (ARCH_AMD64 1)
endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)")
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)")
set (ARCH_AARCH64 1)
endif ()
if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
set (ARCH_ARM 1)
endif ()
if (CMAKE_LIBRARY_ARCHITECTURE MATCHES "i386")
set (ARCH_I386 1)
endif ()
if ((ARCH_ARM AND NOT ARCH_AARCH64) OR ARCH_I386)
message (FATAL_ERROR "32bit platforms are not supported")
endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1)
endif ()
if (CMAKE_SYSTEM_PROCESSOR MATCHES "riscv64")
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "riscv64")
set (ARCH_RISCV64 1)
else ()
message (FATAL_ERROR "Platform ${CMAKE_SYSTEM_PROCESSOR} is not supported")
endif ()

View File

@ -61,7 +61,7 @@ else ()
endif ()
if (ARCH_PPC64LE)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
endif ()
set (TEST_FLAG "-msse4.2")

View File

@ -13,7 +13,7 @@ execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version)
if (COMPILER_GCC)
# Require minimum version of gcc
set (GCC_MINIMUM_VERSION 11)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION} AND NOT CMAKE_VERSION VERSION_LESS 2.8.9)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION})
message (FATAL_ERROR "GCC version must be at least ${GCC_MINIMUM_VERSION}. For example, if GCC ${GCC_MINIMUM_VERSION} is available under gcc-${GCC_MINIMUM_VERSION}, g++-${GCC_MINIMUM_VERSION} names, do the following: export CC=gcc-${GCC_MINIMUM_VERSION} CXX=g++-${GCC_MINIMUM_VERSION}; rm -rf CMakeCache.txt CMakeFiles; and re run cmake or ./release.")
endif ()

View File

@ -17,7 +17,7 @@ services:
- type: ${keeper_fs:-tmpfs}
source: ${keeper_db_dir1:-}
target: /var/lib/clickhouse-keeper
entrypoint: "${keeper_cmd_prefix:-} --config=/etc/clickhouse-keeper/keeper_config1.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
entrypoint: "${keeper_cmd_prefix:-clickhouse keeper} --config=/etc/clickhouse-keeper/keeper_config1.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
cap_add:
- SYS_PTRACE
- NET_ADMIN
@ -47,7 +47,7 @@ services:
- type: ${keeper_fs:-tmpfs}
source: ${keeper_db_dir2:-}
target: /var/lib/clickhouse-keeper
entrypoint: "${keeper_cmd_prefix:-} --config=/etc/clickhouse-keeper/keeper_config2.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
entrypoint: "${keeper_cmd_prefix:-clickhouse keeper} --config=/etc/clickhouse-keeper/keeper_config2.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
cap_add:
- SYS_PTRACE
- NET_ADMIN
@ -77,7 +77,7 @@ services:
- type: ${keeper_fs:-tmpfs}
source: ${keeper_db_dir3:-}
target: /var/lib/clickhouse-keeper
entrypoint: "${keeper_cmd_prefix:-} --config=/etc/clickhouse-keeper/keeper_config3.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
entrypoint: "${keeper_cmd_prefix:-clickhouse keeper} --config=/etc/clickhouse-keeper/keeper_config3.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
cap_add:
- SYS_PTRACE
- NET_ADMIN

View File

@ -75,7 +75,7 @@ This will create the `programs/clickhouse` executable, which can be used with `c
The build requires the following components:
- Git (is used only to checkout the sources, its not needed for the build)
- CMake 3.10 or newer
- CMake 3.14 or newer
- Ninja
- C++ compiler: clang-13 or newer
- Linker: lld

View File

@ -5,6 +5,8 @@ sidebar_label: Boolean
# Boolean Values {#boolean-values}
There is no separate type for boolean values. Use UInt8 type, restricted to the values 0 or 1.
Since https://github.com/ClickHouse/ClickHouse/commit/4076ae77b46794e73594a9f400200088ed1e7a6e , there be a separate type for boolean values.
For versions before that, there is no separate type for boolean values. Use UInt8 type, restricted to the values 0 or 1.
[Original article](https://clickhouse.com/docs/en/data_types/boolean/) <!--hide-->

View File

@ -375,7 +375,7 @@ Result:
- [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) server configuration parameter.
## toStartOfFiveMinute {#tostartoffiveminute}
## toStartOfFiveMinutes {#tostartoffiveminutes}
Rounds down a date with time to the start of the five-minute interval.

View File

@ -384,7 +384,7 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
- Часовая зона сервера, конфигурационный параметр [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone).
## toStartOfFiveMinute {#tostartoffiveminute}
## toStartOfFiveMinutes {#tostartoffiveminutes}
Округляет дату-с-временем вниз до начала пятиминутного интервала.

View File

@ -322,8 +322,9 @@ def process_benchmark_results(args):
required_keys = {
"dbms": ["result"],
"hardware": ["result", "system", "system_full", "kind"],
"versions": ["version", "system"],
}
for benchmark_kind in ["dbms", "hardware"]:
for benchmark_kind in ["dbms", "hardware", "versions"]:
results = []
results_root = os.path.join(benchmark_root, benchmark_kind, "results")
for result in sorted(os.listdir(results_root)):

View File

@ -1,3 +1,5 @@
# 布尔值 {#boolean-values}
没有单独的类型来存储布尔值。可以使用 UInt8 类型,取值限制为 0 或 1。
从 https://github.com/ClickHouse/ClickHouse/commit/4076ae77b46794e73594a9f400200088ed1e7a6e 之后,有单独的类型来存储布尔值。
在此之前的版本,没有单独的类型来存储布尔值。可以使用 UInt8 类型,取值限制为 0 或 1。

View File

@ -227,7 +227,7 @@ SELECT toStartOfSecond(dt64, 'Asia/Istanbul');
- [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) 服务器配置选项。
## toStartOfFiveMinute {#tostartoffiveminute}
## toStartOfFiveMinutes {#tostartoffiveminutes}
将DateTime以五分钟为单位向前取整到最接近的时间点。

View File

@ -18,9 +18,9 @@ conflicts:
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
description: |
Client binary for ClickHouse
ClickHouse is a column-oriented database management system
ClickHouse is a column-oriented database management system.
that allows generating analytical data reports in real time.
This package provides clickhouse-client , clickhouse-local and clickhouse-benchmark
This package provides clickhouse-client, clickhouse-local and clickhouse-benchmark.
overrides:
deb:

View File

@ -15,6 +15,17 @@ shopt -s extglob
export _CLICKHOUSE_COMPLETION_LOADED=1
CLICKHOUSE_logs_level=(
none
fatal
error
warning
information
debug
trace
test
)
CLICKHOUSE_QueryProcessingStage=(
complete
fetch_columns
@ -113,6 +124,10 @@ function _complete_for_clickhouse_generic_bin_impl()
COMPREPLY=( $(compgen -W "${CLICKHOUSE_QueryProcessingStage[*]}" -- "$cur") )
return 1
;;
--send_logs_level)
COMPREPLY=( $(compgen -W "${CLICKHOUSE_logs_level[*]}" -- "$cur") )
return 1
;;
--format|--input-format|--output-format)
COMPREPLY=( $(compgen -W "${CLICKHOUSE_Format[*]}" -- "$cur") )
return 1

View File

@ -1507,7 +1507,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
}
#if defined(OS_LINUX)
if (!TasksStatsCounters::checkIfAvailable())
auto tasks_stats_provider = TasksStatsCounters::findBestAvailableProvider();
if (tasks_stats_provider == TasksStatsCounters::MetricsProvider::None)
{
LOG_INFO(log, "It looks like this system does not have procfs mounted at /proc location,"
" neither clickhouse-server process has CAP_NET_ADMIN capability."
@ -1518,6 +1519,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
" It also doesn't work if you run clickhouse-server inside network namespace as it happens in some containers.",
executable_path);
}
else
{
LOG_INFO(log, "Tasks stats provider: {}", TasksStatsCounters::metricsProviderString(tasks_stats_provider));
}
if (!hasLinuxCapability(CAP_SYS_NICE))
{

View File

@ -1234,6 +1234,7 @@ void ClientBase::sendDataFrom(ReadBuffer & buf, Block & sample, const ColumnsDes
}
void ClientBase::sendDataFromPipe(Pipe&& pipe, ASTPtr parsed_query, bool have_more_data)
try
{
QueryPipeline pipeline(std::move(pipe));
PullingAsyncPipelineExecutor executor(pipeline);
@ -1266,6 +1267,12 @@ void ClientBase::sendDataFromPipe(Pipe&& pipe, ASTPtr parsed_query, bool have_mo
if (!have_more_data)
connection->sendData({}, "", false);
}
catch (...)
{
connection->sendCancel();
receiveEndOfQuery();
throw;
}
void ClientBase::sendDataFromStdin(Block & sample, const ColumnsDescription & columns_description, ASTPtr parsed_query)
{
@ -1406,7 +1413,15 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
apply_query_settings(*with_output->settings_ast);
if (!connection->checkConnected())
{
auto poco_logs_level = Poco::Logger::parseLevel(config().getString("send_logs_level", "none"));
/// Print under WARNING also because it is used by clickhouse-test.
if (poco_logs_level >= Poco::Message::PRIO_WARNING)
{
fmt::print(stderr, "Connection lost. Reconnecting.\n");
}
connect();
}
ASTPtr input_function;
if (insert && insert->select)

View File

@ -377,9 +377,10 @@ bool Connection::ping()
{
// LOG_TRACE(log_wrapper.get(), "Ping");
TimeoutSetter timeout_setter(*socket, sync_request_timeout, true);
try
{
TimeoutSetter timeout_setter(*socket, sync_request_timeout, true);
UInt64 pong = 0;
writeVarUInt(Protocol::Client::Ping, *out);
out->next();
@ -405,6 +406,10 @@ bool Connection::ping()
}
catch (const Poco::Exception & e)
{
/// Explicitly disconnect since ping() can receive EndOfStream,
/// and in this case this ping() will return false,
/// while next ping() may return true.
disconnect();
LOG_TRACE(log_wrapper.get(), fmt::runtime(e.displayText()));
return false;
}

View File

@ -565,7 +565,7 @@ public:
/// NOTE: Assuming timezone offset is a multiple of 15 minutes.
inline Time toStartOfMinute(Time t) const { return toStartOfMinuteInterval(t, 1); }
inline Time toStartOfFiveMinute(Time t) const { return toStartOfMinuteInterval(t, 5); }
inline Time toStartOfFiveMinutes(Time t) const { return toStartOfMinuteInterval(t, 5); }
inline Time toStartOfFifteenMinutes(Time t) const { return toStartOfMinuteInterval(t, 15); }
inline Time toStartOfTenMinutes(Time t) const { return toStartOfMinuteInterval(t, 10); }
inline Time toStartOfHour(Time t) const { return roundDown(t, 3600); }

View File

@ -555,7 +555,7 @@ void LRUFileCache::remove(const Key & key)
fs::remove(key_path);
}
void LRUFileCache::tryRemoveAll()
void LRUFileCache::remove(bool force_remove_unreleasable)
{
/// Try remove all cached files by cache_base_path.
/// Only releasable file segments are evicted.
@ -567,12 +567,13 @@ void LRUFileCache::tryRemoveAll()
auto & [key, offset] = *it++;
auto * cell = getCell(key, offset, cache_lock);
if (cell->releasable())
if (cell->releasable() || force_remove_unreleasable)
{
auto file_segment = cell->file_segment;
if (file_segment)
{
std::lock_guard<std::mutex> segment_lock(file_segment->mutex);
file_segment->detached = true;
remove(file_segment->key(), file_segment->offset(), cache_lock, segment_lock);
}
}

View File

@ -42,7 +42,7 @@ public:
virtual void remove(const Key & key) = 0;
virtual void tryRemoveAll() = 0;
virtual void remove(bool force_remove_unreleasable) = 0;
static bool isReadOnly();
@ -145,7 +145,7 @@ public:
void remove(const Key & key) override;
void tryRemoveAll() override;
void remove(bool force_remove_unreleasable) override;
std::vector<String> tryGetCachePaths(const Key & key) override;

View File

@ -455,6 +455,8 @@ void FileSegment::complete(State state)
std::lock_guard cache_lock(cache->mutex);
std::lock_guard segment_lock(mutex);
assertNotDetached();
bool is_downloader = isDownloaderImpl(segment_lock);
if (!is_downloader)
{
@ -477,8 +479,6 @@ void FileSegment::complete(State state)
download_state = state;
assertNotDetached();
try
{
completeImpl(cache_lock, segment_lock);

View File

@ -296,11 +296,7 @@ PoolWithFailoverBase<TNestedPool>::getMany(
"All connection tries failed. Log: \n\n" + fail_messages + "\n",
DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED);
try_results.erase(
std::remove_if(
try_results.begin(), try_results.end(),
[](const TryResult & r) { return r.entry.isNull() || !r.is_usable; }),
try_results.end());
std::erase_if(try_results, [](const TryResult & r) { return r.entry.isNull() || !r.is_usable; });
/// Sort so that preferred items are near the beginning.
std::stable_sort(

View File

@ -265,26 +265,24 @@ void TaskStatsInfoGetter::getStat(::taskstats & out_stats, pid_t tid) const
{
NetlinkMessage answer = query(netlink_socket_fd, taskstats_family_id, tid, TASKSTATS_CMD_GET, TASKSTATS_CMD_ATTR_PID, &tid, sizeof(tid));
for (const NetlinkMessage::Attribute * attr = &answer.payload.attribute;
attr < answer.end();
attr = attr->next())
{
if (attr->header.nla_type == TASKSTATS_TYPE_AGGR_TGID || attr->header.nla_type == TASKSTATS_TYPE_AGGR_PID)
{
for (const NetlinkMessage::Attribute * nested_attr = reinterpret_cast<const NetlinkMessage::Attribute *>(attr->payload);
nested_attr < attr->next();
nested_attr = nested_attr->next())
{
if (nested_attr->header.nla_type == TASKSTATS_TYPE_STATS)
{
out_stats = unalignedLoad<::taskstats>(nested_attr->payload);
return;
}
}
}
}
const NetlinkMessage::Attribute * attr = &answer.payload.attribute;
if (attr->header.nla_type != TASKSTATS_TYPE_AGGR_PID)
throw Exception("Expected TASKSTATS_TYPE_AGGR_PID", ErrorCodes::NETLINK_ERROR);
throw Exception("There is no TASKSTATS_TYPE_STATS attribute in the Netlink response", ErrorCodes::NETLINK_ERROR);
/// TASKSTATS_TYPE_AGGR_PID
const NetlinkMessage::Attribute * nested_attr = reinterpret_cast<const NetlinkMessage::Attribute *>(attr->payload);
if (nested_attr->header.nla_type != TASKSTATS_TYPE_PID)
throw Exception("Expected TASKSTATS_TYPE_PID", ErrorCodes::NETLINK_ERROR);
if (nested_attr == nested_attr->next())
throw Exception("No TASKSTATS_TYPE_STATS packet after TASKSTATS_TYPE_PID", ErrorCodes::NETLINK_ERROR);
nested_attr = nested_attr->next();
if (nested_attr->header.nla_type != TASKSTATS_TYPE_STATS)
throw Exception("Expected TASKSTATS_TYPE_STATS", ErrorCodes::NETLINK_ERROR);
out_stats = unalignedLoad<::taskstats>(nested_attr->payload);
if (attr->next() != answer.end())
throw Exception("Unexpected end of response", ErrorCodes::NETLINK_ERROR);
}

View File

@ -10,6 +10,11 @@ namespace DB
{
/// Get taskstat info from OS kernel via Netlink protocol.
///
/// NOTE: unlike procfs interface, netlink interface, rounds some values to KiBs [1].
///
/// [1]: https://elixir.bootlin.com/linux/v5.18-rc4/source/kernel/tsacct.c#L101
///
class TaskStatsInfoGetter : private boost::noncopyable
{
public:

View File

@ -67,6 +67,20 @@ namespace ProfileEvents
namespace DB
{
const char * TasksStatsCounters::metricsProviderString(MetricsProvider provider)
{
switch (provider)
{
case MetricsProvider::None:
return "none";
case MetricsProvider::Procfs:
return "procfs";
case MetricsProvider::Netlink:
return "netlink";
}
__builtin_unreachable();
}
bool TasksStatsCounters::checkIfAvailable()
{
return findBestAvailableProvider() != MetricsProvider::None;

View File

@ -176,7 +176,17 @@ extern PerfEventsCounters current_thread_counters;
class TasksStatsCounters
{
public:
enum class MetricsProvider
{
None,
Procfs,
Netlink,
};
static const char * metricsProviderString(MetricsProvider provider);
static bool checkIfAvailable();
static MetricsProvider findBestAvailableProvider();
static std::unique_ptr<TasksStatsCounters> create(UInt64 tid);
void reset();
@ -186,16 +196,8 @@ private:
::taskstats stats; //-V730_NOINIT
std::function<::taskstats()> stats_getter;
enum class MetricsProvider
{
None,
Procfs,
Netlink
};
explicit TasksStatsCounters(UInt64 tid, MetricsProvider provider);
static MetricsProvider findBestAvailableProvider();
static void incrementProfileEvents(const ::taskstats & prev, const ::taskstats & curr, ProfileEvents::Counters & profile_events);
};

View File

@ -52,7 +52,7 @@ private:
{
std::cerr << "Mutating\n";
auto res = shallowMutate();
res->wrapped = IColumn::mutate(wrapped);
res->wrapped = IColumn::mutate(std::move(res->wrapped).detach());
return res;
}

View File

@ -59,23 +59,20 @@ namespace Format
{
size_t i = 0;
bool should_delete = true;
str.erase(
std::remove_if(
str.begin(),
str.end(),
[&i, &should_delete, &str](char)
std::erase_if(
str,
[&i, &should_delete, &str](char)
{
bool is_double_brace = (str[i] == '{' && str[i + 1] == '{') || (str[i] == '}' && str[i + 1] == '}');
++i;
if (is_double_brace && should_delete)
{
bool is_double_brace = (str[i] == '{' && str[i + 1] == '{') || (str[i] == '}' && str[i + 1] == '}');
++i;
if (is_double_brace && should_delete)
{
should_delete = false;
return true;
}
should_delete = true;
return false;
}),
str.end());
should_delete = false;
return true;
}
should_delete = true;
return false;
});
};
index_positions.emplace_back();

View File

@ -119,7 +119,7 @@ TEST(DateLUTTest, TimeValuesInMiddleOfRange)
EXPECT_EQ(lut.toSecond(time), 11 /*unsigned*/);
EXPECT_EQ(lut.toMinute(time), 20 /*unsigned*/);
EXPECT_EQ(lut.toStartOfMinute(time), 1568650800 /*time_t*/);
EXPECT_EQ(lut.toStartOfFiveMinute(time), 1568650800 /*time_t*/);
EXPECT_EQ(lut.toStartOfFiveMinutes(time), 1568650800 /*time_t*/);
EXPECT_EQ(lut.toStartOfFifteenMinutes(time), 1568650500 /*time_t*/);
EXPECT_EQ(lut.toStartOfTenMinutes(time), 1568650800 /*time_t*/);
EXPECT_EQ(lut.toStartOfHour(time), 1568649600 /*time_t*/);
@ -181,7 +181,7 @@ TEST(DateLUTTest, TimeValuesAtLeftBoderOfRange)
EXPECT_EQ(lut.toSecond(time), 0 /*unsigned*/);
EXPECT_EQ(lut.toMinute(time), 0 /*unsigned*/);
EXPECT_EQ(lut.toStartOfMinute(time), 0 /*time_t*/);
EXPECT_EQ(lut.toStartOfFiveMinute(time), 0 /*time_t*/);
EXPECT_EQ(lut.toStartOfFiveMinutes(time), 0 /*time_t*/);
EXPECT_EQ(lut.toStartOfFifteenMinutes(time), 0 /*time_t*/);
EXPECT_EQ(lut.toStartOfTenMinutes(time), 0 /*time_t*/);
EXPECT_EQ(lut.toStartOfHour(time), 0 /*time_t*/);
@ -244,7 +244,7 @@ TEST(DateLUTTest, TimeValuesAtRightBoderOfRangeOfOldLUT)
EXPECT_EQ(lut.toMinute(time), 17 /*unsigned*/);
EXPECT_EQ(lut.toSecond(time), 53 /*unsigned*/);
EXPECT_EQ(lut.toStartOfMinute(time), 4294343820 /*time_t*/);
EXPECT_EQ(lut.toStartOfFiveMinute(time), 4294343700 /*time_t*/);
EXPECT_EQ(lut.toStartOfFiveMinutes(time), 4294343700 /*time_t*/);
EXPECT_EQ(lut.toStartOfFifteenMinutes(time), 4294343700 /*time_t*/);
EXPECT_EQ(lut.toStartOfTenMinutes(time), 4294343400 /*time_t*/);
EXPECT_EQ(lut.toStartOfHour(time), 4294342800 /*time_t*/);

View File

@ -152,10 +152,7 @@ ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, Context
storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, mysql_table_name);
/// Unset settings
storage_children.erase(
std::remove_if(storage_children.begin(), storage_children.end(),
[&](const ASTPtr & element) { return element.get() == ast_storage->settings; }),
storage_children.end());
std::erase_if(storage_children, [&](const ASTPtr & element) { return element.get() == ast_storage->settings; });
ast_storage->settings = nullptr;
}
auto create_table_query = DB::getCreateQueryFromStorage(storage, table_storage_define, true,

View File

@ -219,10 +219,10 @@ private:
auto current_box = Box(Point(current_min_x, current_min_y), Point(current_max_x, current_max_y));
Polygon tmp_poly;
bg::convert(current_box, tmp_poly);
possible_ids.erase(std::remove_if(possible_ids.begin(), possible_ids.end(), [&](const auto id)
std::erase_if(possible_ids, [&](const auto id)
{
return !bg::intersects(current_box, polygons[id]);
}), possible_ids.end());
});
int covered = 0;
#ifndef __clang_analyzer__ /// Triggers a warning in boost geometry.
auto it = std::find_if(possible_ids.begin(), possible_ids.end(), [&](const auto id)

View File

@ -467,17 +467,17 @@ struct ToStartOfNanosecondImpl
using FactorTransform = ZeroTransform;
};
struct ToStartOfFiveMinuteImpl
struct ToStartOfFiveMinutesImpl
{
static constexpr auto name = "toStartOfFiveMinute";
static constexpr auto name = "toStartOfFiveMinutes";
static inline UInt32 execute(const DecimalUtils::DecimalComponents<DateTime64> & t, const DateLUTImpl & time_zone)
{
return time_zone.toStartOfFiveMinute(t.whole);
return time_zone.toStartOfFiveMinutes(t.whole);
}
static inline UInt32 execute(UInt32 t, const DateLUTImpl & time_zone)
{
return time_zone.toStartOfFiveMinute(t);
return time_zone.toStartOfFiveMinutes(t);
}
static inline UInt32 execute(Int32, const DateLUTImpl &)
{

View File

@ -27,7 +27,7 @@ void registerFunctionToLastDayOfMonth(FunctionFactory &);
void registerFunctionToStartOfQuarter(FunctionFactory &);
void registerFunctionToStartOfYear(FunctionFactory &);
void registerFunctionToStartOfMinute(FunctionFactory &);
void registerFunctionToStartOfFiveMinute(FunctionFactory &);
void registerFunctionToStartOfFiveMinutes(FunctionFactory &);
void registerFunctionToStartOfTenMinutes(FunctionFactory &);
void registerFunctionToStartOfFifteenMinutes(FunctionFactory &);
void registerFunctionToStartOfHour(FunctionFactory &);
@ -109,7 +109,7 @@ void registerFunctionsDateTime(FunctionFactory & factory)
registerFunctionToStartOfMillisecond(factory);
registerFunctionToStartOfSecond(factory);
registerFunctionToStartOfMinute(factory);
registerFunctionToStartOfFiveMinute(factory);
registerFunctionToStartOfFiveMinutes(factory);
registerFunctionToStartOfTenMinutes(factory);
registerFunctionToStartOfFifteenMinutes(factory);
registerFunctionToStartOfHour(factory);

View File

@ -1,18 +0,0 @@
#include <Functions/FunctionFactory.h>
#include <Functions/DateTimeTransforms.h>
#include <Functions/FunctionDateOrDateTimeToSomething.h>
namespace DB
{
using FunctionToStartOfFiveMinute = FunctionDateOrDateTimeToSomething<DataTypeDateTime, ToStartOfFiveMinuteImpl>;
void registerFunctionToStartOfFiveMinute(FunctionFactory & factory)
{
factory.registerFunction<FunctionToStartOfFiveMinute>();
}
}

View File

@ -0,0 +1,19 @@
#include <Functions/FunctionFactory.h>
#include <Functions/DateTimeTransforms.h>
#include <Functions/FunctionDateOrDateTimeToSomething.h>
namespace DB
{
using FunctionToStartOfFiveMinutes = FunctionDateOrDateTimeToSomething<DataTypeDateTime, ToStartOfFiveMinutesImpl>;
void registerFunctionToStartOfFiveMinutes(FunctionFactory & factory)
{
factory.registerFunction<FunctionToStartOfFiveMinutes>();
factory.registerAlias("toStartOfFiveMinute", FunctionToStartOfFiveMinutes::name);
}
}

View File

@ -1,3 +1,4 @@
#include <base/arithmeticOverflow.h>
#include <Common/DateLUTImpl.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeDate.h>
@ -20,6 +21,7 @@ namespace ErrorCodes
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int DECIMAL_OVERFLOW;
}
@ -217,7 +219,9 @@ namespace
{
if (scale_multiplier < 1000)
{
Int64 t_milliseconds = t * (static_cast<Int64>(1000) / scale_multiplier);
Int64 t_milliseconds = 0;
if (common::mulOverflow(t, static_cast<Int64>(1000) / scale_multiplier, t_milliseconds))
throw DB::Exception("Numeric overflow", ErrorCodes::DECIMAL_OVERFLOW);
if (likely(t >= 0))
return t_milliseconds / milliseconds * milliseconds;
else
@ -252,7 +256,9 @@ namespace
{
if (scale_multiplier < 1000000)
{
Int64 t_microseconds = t * (static_cast<Int64>(1000000) / scale_multiplier);
Int64 t_microseconds = 0;
if (common::mulOverflow(t, static_cast<Int64>(1000000) / scale_multiplier, t_microseconds))
throw DB::Exception("Numeric overflow", ErrorCodes::DECIMAL_OVERFLOW);
if (likely(t >= 0))
return t_microseconds / microseconds * microseconds;
else
@ -287,7 +293,9 @@ namespace
{
if (scale_multiplier < 1000000000)
{
Int64 t_nanoseconds = t * (static_cast<Int64>(1000000000) / scale_multiplier);
Int64 t_nanoseconds = 0;
if (common::mulOverflow(t, (static_cast<Int64>(1000000000) / scale_multiplier), t_nanoseconds))
throw DB::Exception("Numeric overflow", ErrorCodes::DECIMAL_OVERFLOW);
if (likely(t >= 0))
return t_nanoseconds / nanoseconds * nanoseconds;
else

View File

@ -435,8 +435,7 @@ void ActionsDAG::removeUnusedActions(bool allow_remove_inputs, bool allow_consta
}
nodes.remove_if([&](const Node & node) { return !visited_nodes.contains(&node); });
auto it = std::remove_if(inputs.begin(), inputs.end(), [&](const Node * node) { return !visited_nodes.contains(node); });
inputs.erase(it, inputs.end());
std::erase_if(inputs, [&](const Node * node) { return !visited_nodes.contains(node); });
}
static ColumnWithTypeAndName executeActionForHeader(const ActionsDAG::Node * node, ColumnsWithTypeAndName arguments)

View File

@ -221,7 +221,7 @@ DDLTaskPtr DDLWorker::initAndCheckTask(const String & entry_name, String & out_r
static void filterAndSortQueueNodes(Strings & all_nodes)
{
all_nodes.erase(std::remove_if(all_nodes.begin(), all_nodes.end(), [] (const String & s) { return !startsWith(s, "query-"); }), all_nodes.end());
std::erase_if(all_nodes, [] (const String & s) { return !startsWith(s, "query-"); });
::sort(all_nodes.begin(), all_nodes.end());
}

View File

@ -306,12 +306,12 @@ BlockIO InterpreterSystemQuery::execute()
{
auto caches = FileCacheFactory::instance().getAll();
for (const auto & [_, cache_data] : caches)
cache_data.cache->tryRemoveAll();
cache_data.cache->remove(query.force_removal);
}
else
{
auto cache = FileCacheFactory::instance().get(query.filesystem_cache_path);
cache->tryRemoveAll();
cache->remove(query.force_removal);
}
break;
}

View File

@ -192,6 +192,13 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
<< (settings.hilite ? hilite_keyword : "") << " SECOND"
<< (settings.hilite ? hilite_none : "");
}
else if (type == Type::DROP_FILESYSTEM_CACHE)
{
if (!filesystem_cache_path.empty())
settings.ostr << (settings.hilite ? hilite_none : "") << filesystem_cache_path;
if (force_removal)
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FORCE";
}
}

View File

@ -89,7 +89,10 @@ public:
String volume;
String disk;
UInt64 seconds{};
/// Values for `drop filesystem cache` system query.
String filesystem_cache_path;
bool force_removal = false;
String getID(char) const override { return "SYSTEM query"; }

View File

@ -346,6 +346,16 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
res->seconds = seconds->as<ASTLiteral>()->value.get<UInt64>();
break;
}
case Type::DROP_FILESYSTEM_CACHE:
{
ParserLiteral path_parser;
ASTPtr ast;
if (path_parser.parse(pos, ast, expected))
res->filesystem_cache_path = ast->as<ASTLiteral>()->value.safeGet<String>();
if (ParserKeyword{"FORCE"}.ignore(pos, expected))
res->force_removal = true;
break;
}
default:
{

View File

@ -274,9 +274,9 @@ std::string buildTaggedRegex(std::string regexp_str)
std::vector<std::string> tags;
splitInto<';'>(tags, regexp_str);
/* remove empthy elements */
/* remove empty elements */
using namespace std::string_literals;
tags.erase(std::remove(tags.begin(), tags.end(), ""s), tags.end());
std::erase(tags, ""s);
if (tags[0].find('=') == tags[0].npos)
{
if (tags.size() == 1) /* only name */

View File

@ -217,6 +217,14 @@ public:
private:
ColumnsContainer columns;
/// Subcolumns are not nested columns.
///
/// Example of subcolumns:
/// - .size0 for Array
/// - .null for Nullable
///
/// While nested columns have form like foo.bar
SubcolumnsContainter subcolumns;
void modifyColumnOrder(const String & column_name, const String & after_column, bool first);

View File

@ -44,12 +44,11 @@ bool injectRequiredColumnsRecursively(
if (alter_conversions.isColumnRenamed(column_name_in_part))
column_name_in_part = alter_conversions.getColumnOldName(column_name_in_part);
auto column_in_part = NameAndTypePair(
column_name_in_part, column_in_storage->getSubcolumnName(),
column_in_storage->getTypeInStorage(), column_in_storage->type);
auto column_in_part = part->getColumns().tryGetByName(column_name_in_part);
/// column has files and hence does not require evaluation
if (part->hasColumnFiles(column_in_part))
if (column_in_part
&& (!column_in_storage->isSubcolumn()
|| column_in_part->type->tryGetSubcolumnType(column_in_storage->getSubcolumnName())))
{
/// ensure each column is added only once
if (!required_columns.contains(column_name))

View File

@ -3676,10 +3676,10 @@ void MergeTreeData::movePartitionToDisk(const ASTPtr & partition, const String &
parts = getVisibleDataPartsVectorInPartition(local_context, partition_id);
auto disk = getStoragePolicy()->getDiskByName(name);
parts.erase(std::remove_if(parts.begin(), parts.end(), [&](auto part_ptr)
std::erase_if(parts, [&](auto part_ptr)
{
return part_ptr->volume->getDisk()->getName() == disk->getName();
}), parts.end());
});
if (parts.empty())
{
@ -3724,7 +3724,7 @@ void MergeTreeData::movePartitionToVolume(const ASTPtr & partition, const String
if (parts.empty())
throw Exception("Nothing to move (сheck that the partition exists).", ErrorCodes::NO_SUCH_DATA_PART);
parts.erase(std::remove_if(parts.begin(), parts.end(), [&](auto part_ptr)
std::erase_if(parts, [&](auto part_ptr)
{
for (const auto & disk : volume->getDisks())
{
@ -3734,7 +3734,7 @@ void MergeTreeData::movePartitionToVolume(const ASTPtr & partition, const String
}
}
return false;
}), parts.end());
});
if (parts.empty())
{
@ -4220,8 +4220,7 @@ void MergeTreeData::filterVisibleDataParts(DataPartsVector & maybe_visible_parts
return !part->version.isVisible(snapshot_version, current_tid);
};
auto new_end_it = std::remove_if(maybe_visible_parts.begin(), maybe_visible_parts.end(), need_remove_pred);
maybe_visible_parts.erase(new_end_it, maybe_visible_parts.end());
std::erase_if(maybe_visible_parts, need_remove_pred);
[[maybe_unused]] size_t visible_size = maybe_visible_parts.size();
@ -6515,15 +6514,11 @@ ReservationPtr MergeTreeData::balancedReservation(
}
// Remove irrelevant parts.
covered_parts.erase(
std::remove_if(
covered_parts.begin(),
covered_parts.end(),
std::erase_if(covered_parts,
[min_bytes_to_rebalance_partition_over_jbod](const auto & part)
{
return !(part->isStoredOnDisk() && part->getBytesOnDisk() >= min_bytes_to_rebalance_partition_over_jbod);
}),
covered_parts.end());
});
// Include current submerging big parts which are not yet in `currently_submerging_big_parts`
for (const auto & part : covered_parts)

View File

@ -176,11 +176,9 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
return active_parts_set.getContainingPart(part->info) != part->name;
};
auto new_end_it = std::remove_if(active_parts.begin(), active_parts.end(), remove_pred);
active_parts.erase(new_end_it, active_parts.end());
std::erase_if(active_parts, remove_pred);
new_end_it = std::remove_if(outdated_parts.begin(), outdated_parts.end(), remove_pred);
outdated_parts.erase(new_end_it, outdated_parts.end());
std::erase_if(outdated_parts, remove_pred);
std::merge(active_parts.begin(), active_parts.end(),
outdated_parts.begin(), outdated_parts.end(),
@ -640,220 +638,4 @@ size_t MergeTreeDataMergerMutator::estimateNeededDiskSpace(const MergeTreeData::
return static_cast<size_t>(res * DISK_USAGE_COEFFICIENT_TO_RESERVE);
}
void MergeTreeDataMergerMutator::splitMutationCommands(
MergeTreeData::DataPartPtr part,
const MutationCommands & commands,
MutationCommands & for_interpreter,
MutationCommands & for_file_renames)
{
ColumnsDescription part_columns(part->getColumns());
if (!isWidePart(part))
{
NameSet mutated_columns;
for (const auto & command : commands)
{
if (command.type == MutationCommand::Type::MATERIALIZE_INDEX
|| command.type == MutationCommand::Type::MATERIALIZE_COLUMN
|| command.type == MutationCommand::Type::MATERIALIZE_PROJECTION
|| command.type == MutationCommand::Type::MATERIALIZE_TTL
|| command.type == MutationCommand::Type::DELETE
|| command.type == MutationCommand::Type::UPDATE)
{
for_interpreter.push_back(command);
for (const auto & [column_name, expr] : command.column_to_update_expression)
mutated_columns.emplace(column_name);
if (command.type == MutationCommand::Type::MATERIALIZE_COLUMN)
mutated_columns.emplace(command.column_name);
}
else if (command.type == MutationCommand::Type::DROP_INDEX || command.type == MutationCommand::Type::DROP_PROJECTION)
{
for_file_renames.push_back(command);
}
else if (part_columns.has(command.column_name))
{
if (command.type == MutationCommand::Type::DROP_COLUMN)
{
mutated_columns.emplace(command.column_name);
}
else if (command.type == MutationCommand::Type::RENAME_COLUMN)
{
for_interpreter.push_back(
{
.type = MutationCommand::Type::READ_COLUMN,
.column_name = command.rename_to,
});
mutated_columns.emplace(command.column_name);
part_columns.rename(command.column_name, command.rename_to);
}
}
}
/// If it's compact part, then we don't need to actually remove files
/// from disk we just don't read dropped columns
for (const auto & column : part->getColumns())
{
if (!mutated_columns.contains(column.name))
for_interpreter.emplace_back(
MutationCommand{.type = MutationCommand::Type::READ_COLUMN, .column_name = column.name, .data_type = column.type});
}
}
else
{
for (const auto & command : commands)
{
if (command.type == MutationCommand::Type::MATERIALIZE_INDEX
|| command.type == MutationCommand::Type::MATERIALIZE_COLUMN
|| command.type == MutationCommand::Type::MATERIALIZE_PROJECTION
|| command.type == MutationCommand::Type::MATERIALIZE_TTL
|| command.type == MutationCommand::Type::DELETE
|| command.type == MutationCommand::Type::UPDATE)
{
for_interpreter.push_back(command);
}
else if (command.type == MutationCommand::Type::DROP_INDEX || command.type == MutationCommand::Type::DROP_PROJECTION)
{
for_file_renames.push_back(command);
}
/// If we don't have this column in source part, than we don't need
/// to materialize it
else if (part_columns.has(command.column_name))
{
if (command.type == MutationCommand::Type::READ_COLUMN)
{
for_interpreter.push_back(command);
}
else if (command.type == MutationCommand::Type::RENAME_COLUMN)
{
part_columns.rename(command.column_name, command.rename_to);
for_file_renames.push_back(command);
}
else
{
for_file_renames.push_back(command);
}
}
}
}
}
std::pair<NamesAndTypesList, SerializationInfoByName>
MergeTreeDataMergerMutator::getColumnsForNewDataPart(
MergeTreeData::DataPartPtr source_part,
const Block & updated_header,
NamesAndTypesList storage_columns,
const SerializationInfoByName & serialization_infos,
const MutationCommands & commands_for_removes)
{
NameSet removed_columns;
NameToNameMap renamed_columns_to_from;
NameToNameMap renamed_columns_from_to;
ColumnsDescription part_columns(source_part->getColumns());
/// All commands are validated in AlterCommand so we don't care about order
for (const auto & command : commands_for_removes)
{
/// If we don't have this column in source part, than we don't need to materialize it
if (!part_columns.has(command.column_name))
continue;
if (command.type == MutationCommand::DROP_COLUMN)
removed_columns.insert(command.column_name);
if (command.type == MutationCommand::RENAME_COLUMN)
{
renamed_columns_to_from.emplace(command.rename_to, command.column_name);
renamed_columns_from_to.emplace(command.column_name, command.rename_to);
}
}
SerializationInfoByName new_serialization_infos;
for (const auto & [name, info] : serialization_infos)
{
if (removed_columns.contains(name))
continue;
auto it = renamed_columns_from_to.find(name);
if (it != renamed_columns_from_to.end())
new_serialization_infos.emplace(it->second, info);
else
new_serialization_infos.emplace(name, info);
}
/// In compact parts we read all columns, because they all stored in a
/// single file
if (!isWidePart(source_part))
return {updated_header.getNamesAndTypesList(), new_serialization_infos};
Names source_column_names = source_part->getColumns().getNames();
NameSet source_columns_name_set(source_column_names.begin(), source_column_names.end());
for (auto it = storage_columns.begin(); it != storage_columns.end();)
{
if (updated_header.has(it->name))
{
auto updated_type = updated_header.getByName(it->name).type;
if (updated_type != it->type)
it->type = updated_type;
++it;
}
else
{
if (!source_columns_name_set.contains(it->name))
{
/// Source part doesn't have column but some other column
/// was renamed to it's name.
auto renamed_it = renamed_columns_to_from.find(it->name);
if (renamed_it != renamed_columns_to_from.end()
&& source_columns_name_set.contains(renamed_it->second))
++it;
else
it = storage_columns.erase(it);
}
else
{
/// Check that this column was renamed to some other name
bool was_renamed = renamed_columns_from_to.contains(it->name);
bool was_removed = removed_columns.contains(it->name);
/// If we want to rename this column to some other name, than it
/// should it's previous version should be dropped or removed
if (renamed_columns_to_from.contains(it->name) && !was_renamed && !was_removed)
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Incorrect mutation commands, trying to rename column {} to {}, but part {} already has column {}", renamed_columns_to_from[it->name], it->name, source_part->name, it->name);
/// Column was renamed and no other column renamed to it's name
/// or column is dropped.
if (!renamed_columns_to_from.contains(it->name) && (was_renamed || was_removed))
it = storage_columns.erase(it);
else
++it;
}
}
}
return {storage_columns, new_serialization_infos};
}
ExecuteTTLType MergeTreeDataMergerMutator::shouldExecuteTTL(const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies)
{
if (!metadata_snapshot->hasAnyTTL())
return ExecuteTTLType::NONE;
bool has_ttl_expression = false;
for (const auto & dependency : dependencies)
{
if (dependency.kind == ColumnDependency::TTL_EXPRESSION)
has_ttl_expression = true;
if (dependency.kind == ColumnDependency::TTL_TARGET)
return ExecuteTTLType::NORMAL;
}
return has_ttl_expression ? ExecuteTTLType::RECALCULATE : ExecuteTTLType::NONE;
}
}

View File

@ -146,27 +146,6 @@ private:
friend class MutateTask;
friend class MergeTask;
/** Split mutation commands into two parts:
* First part should be executed by mutations interpreter.
* Other is just simple drop/renames, so they can be executed without interpreter.
*/
static void splitMutationCommands(
MergeTreeData::DataPartPtr part,
const MutationCommands & commands,
MutationCommands & for_interpreter,
MutationCommands & for_file_renames);
/// Get the columns list of the resulting part in the same order as storage_columns.
static std::pair<NamesAndTypesList, SerializationInfoByName> getColumnsForNewDataPart(
MergeTreeData::DataPartPtr source_part,
const Block & updated_header,
NamesAndTypesList storage_columns,
const SerializationInfoByName & serialization_infos,
const MutationCommands & commands_for_removes);
static ExecuteTTLType shouldExecuteTTL(
const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies);
public :
/** Is used to cancel all merges and mutations. On cancel() call all currently running actions will throw exception soon.
* All new attempts to start a merge or mutation will throw an exception until all 'LockHolder' objects will be destroyed.

View File

@ -74,7 +74,7 @@ static void splitMutationCommands(
mutated_columns.emplace(column_name);
if (command.type == MutationCommand::Type::MATERIALIZE_COLUMN)
mutated_columns.emplace(command.column_name);
mutated_columns.emplace(command.column_name);
}
else if (command.type == MutationCommand::Type::DROP_INDEX || command.type == MutationCommand::Type::DROP_PROJECTION)
{
@ -146,6 +146,124 @@ static void splitMutationCommands(
}
}
/// Get the columns list of the resulting part in the same order as storage_columns.
static std::pair<NamesAndTypesList, SerializationInfoByName>
getColumnsForNewDataPart(
MergeTreeData::DataPartPtr source_part,
const Block & updated_header,
NamesAndTypesList storage_columns,
const SerializationInfoByName & serialization_infos,
const MutationCommands & commands_for_removes)
{
NameSet removed_columns;
NameToNameMap renamed_columns_to_from;
NameToNameMap renamed_columns_from_to;
ColumnsDescription part_columns(source_part->getColumns());
/// All commands are validated in AlterCommand so we don't care about order
for (const auto & command : commands_for_removes)
{
/// If we don't have this column in source part, than we don't need to materialize it
if (!part_columns.has(command.column_name))
continue;
if (command.type == MutationCommand::DROP_COLUMN)
removed_columns.insert(command.column_name);
if (command.type == MutationCommand::RENAME_COLUMN)
{
renamed_columns_to_from.emplace(command.rename_to, command.column_name);
renamed_columns_from_to.emplace(command.column_name, command.rename_to);
}
}
SerializationInfoByName new_serialization_infos;
for (const auto & [name, info] : serialization_infos)
{
if (removed_columns.contains(name))
continue;
auto it = renamed_columns_from_to.find(name);
if (it != renamed_columns_from_to.end())
new_serialization_infos.emplace(it->second, info);
else
new_serialization_infos.emplace(name, info);
}
/// In compact parts we read all columns, because they all stored in a
/// single file
if (!isWidePart(source_part))
return {updated_header.getNamesAndTypesList(), new_serialization_infos};
Names source_column_names = source_part->getColumns().getNames();
NameSet source_columns_name_set(source_column_names.begin(), source_column_names.end());
for (auto it = storage_columns.begin(); it != storage_columns.end();)
{
if (updated_header.has(it->name))
{
auto updated_type = updated_header.getByName(it->name).type;
if (updated_type != it->type)
it->type = updated_type;
++it;
}
else
{
if (!source_columns_name_set.contains(it->name))
{
/// Source part doesn't have column but some other column
/// was renamed to it's name.
auto renamed_it = renamed_columns_to_from.find(it->name);
if (renamed_it != renamed_columns_to_from.end()
&& source_columns_name_set.contains(renamed_it->second))
++it;
else
it = storage_columns.erase(it);
}
else
{
/// Check that this column was renamed to some other name
bool was_renamed = renamed_columns_from_to.contains(it->name);
bool was_removed = removed_columns.contains(it->name);
/// If we want to rename this column to some other name, than it
/// should it's previous version should be dropped or removed
if (renamed_columns_to_from.contains(it->name) && !was_renamed && !was_removed)
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Incorrect mutation commands, trying to rename column {} to {}, but part {} already has column {}", renamed_columns_to_from[it->name], it->name, source_part->name, it->name);
/// Column was renamed and no other column renamed to it's name
/// or column is dropped.
if (!renamed_columns_to_from.contains(it->name) && (was_renamed || was_removed))
it = storage_columns.erase(it);
else
++it;
}
}
}
return {storage_columns, new_serialization_infos};
}
static ExecuteTTLType shouldExecuteTTL(const StorageMetadataPtr & metadata_snapshot, const ColumnDependencies & dependencies)
{
if (!metadata_snapshot->hasAnyTTL())
return ExecuteTTLType::NONE;
bool has_ttl_expression = false;
for (const auto & dependency : dependencies)
{
if (dependency.kind == ColumnDependency::TTL_EXPRESSION)
has_ttl_expression = true;
if (dependency.kind == ColumnDependency::TTL_TARGET)
return ExecuteTTLType::NORMAL;
}
return has_ttl_expression ? ExecuteTTLType::RECALCULATE : ExecuteTTLType::NONE;
}
/// Get skip indices, that should exists in the resulting data part.
static MergeTreeIndices getIndicesForNewDataPart(
@ -1337,7 +1455,7 @@ bool MutateTask::prepare()
/// It shouldn't be changed by mutation.
ctx->new_data_part->index_granularity_info = ctx->source_part->index_granularity_info;
auto [new_columns, new_infos] = MergeTreeDataMergerMutator::getColumnsForNewDataPart(
auto [new_columns, new_infos] = MutationHelpers::getColumnsForNewDataPart(
ctx->source_part, ctx->updated_header, ctx->storage_columns,
ctx->source_part->getSerializationInfos(), ctx->commands_for_part);
@ -1357,7 +1475,7 @@ bool MutateTask::prepare()
ctx->execute_ttl_type = ExecuteTTLType::NONE;
if (ctx->mutating_pipeline.initialized())
ctx->execute_ttl_type = MergeTreeDataMergerMutator::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies());
ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies());
/// All columns from part are changed and may be some more that were missing before in part
/// TODO We can materialize compact part without copying data

View File

@ -611,9 +611,7 @@ int32_t ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::ZooKeeperPtr zookeeper
/// Multiple log entries that must be copied to the queue.
log_entries.erase(
std::remove_if(log_entries.begin(), log_entries.end(), [&min_log_entry](const String & entry) { return entry < min_log_entry; }),
log_entries.end());
std::erase_if(log_entries, [&min_log_entry](const String & entry) { return entry < min_log_entry; });
if (!log_entries.empty())
{

View File

@ -279,10 +279,10 @@ void StorageSystemZooKeeper::fillData(MutableColumns & res_columns, ContextPtr c
if (!prefix.empty())
{
// Remove nodes that do not match specified prefix
nodes.erase(std::remove_if(nodes.begin(), nodes.end(), [&prefix, &path_part] (const String & node)
std::erase_if(nodes, [&prefix, &path_part] (const String & node)
{
return (path_part + '/' + node).substr(0, prefix.size()) != prefix;
}), nodes.end());
});
}
std::vector<std::future<Coordination::GetResponse>> futures;

View File

@ -108,7 +108,8 @@ def _exec_get_with_retry(url):
WorkflowDescription = namedtuple(
"WorkflowDescription", ["run_id", "status", "rerun_url", "cancel_url", "conclusion"]
"WorkflowDescription",
["run_id", "head_sha", "status", "rerun_url", "cancel_url", "conclusion"],
)
@ -160,6 +161,7 @@ def get_workflows_description_for_pull_request(
workflow_descriptions.append(
WorkflowDescription(
run_id=workflow["id"],
head_sha=workflow["head_sha"],
status=workflow["status"],
rerun_url=workflow["rerun_url"],
cancel_url=workflow["cancel_url"],
@ -170,11 +172,9 @@ def get_workflows_description_for_pull_request(
return workflow_descriptions
def get_workflow_description_fallback(event_data) -> List[WorkflowDescription]:
pull_request_event = event_data["pull_request"]
def get_workflow_description_fallback(pull_request_event) -> List[WorkflowDescription]:
head_repo = pull_request_event["head"]["repo"]["full_name"]
head_branch = pull_request_event["head"]["ref"]
head_sha = pull_request_event["head"]["sha"]
print("Get last 500 workflows from API to search related there")
# Fallback for a case of an already deleted branch and no workflows received
request_url = f"{API_URL}/actions/runs?per_page=100"
@ -213,16 +213,11 @@ def get_workflow_description_fallback(event_data) -> List[WorkflowDescription]:
}
for wf in workflows_data
]
if event_data["action"] == "synchronize":
print(f"Leave only workflows with SHA but {head_sha} for updated PR")
# Cancel all events with SHA different than current
workflows_data = list(
filter(lambda x: x["head_sha"] != head_sha, workflows_data)
)
workflow_descriptions = [
WorkflowDescription(
run_id=wf["id"],
head_sha=wf["head_sha"],
status=wf["status"],
rerun_url=wf["rerun_url"],
cancel_url=wf["cancel_url"],
@ -238,6 +233,7 @@ def get_workflow_description(workflow_id) -> WorkflowDescription:
workflow = _exec_get_with_retry(API_URL + f"/actions/runs/{workflow_id}")
return WorkflowDescription(
run_id=workflow["id"],
head_sha=workflow["head_sha"],
status=workflow["status"],
rerun_url=workflow["rerun_url"],
cancel_url=workflow["cancel_url"],
@ -281,7 +277,7 @@ def main(event):
print("PR merged/closed or manually labeled 'do not test' will kill workflows")
workflow_descriptions = get_workflows_description_for_pull_request(pull_request)
workflow_descriptions = (
workflow_descriptions or get_workflow_description_fallback(event_data)
workflow_descriptions or get_workflow_description_fallback(pull_request)
)
urls_to_cancel = []
for workflow_description in workflow_descriptions:
@ -296,13 +292,14 @@ def main(event):
print("PR is synchronized, going to stop old actions")
workflow_descriptions = get_workflows_description_for_pull_request(pull_request)
workflow_descriptions = (
workflow_descriptions or get_workflow_description_fallback(event_data)
workflow_descriptions or get_workflow_description_fallback(pull_request)
)
urls_to_cancel = []
for workflow_description in workflow_descriptions:
if (
workflow_description.status != "completed"
and workflow_description.conclusion != "cancelled"
and workflow_description.head_sha != pull_request["head"]["sha"]
):
urls_to_cancel.append(workflow_description.cancel_url)
print(f"Found {len(urls_to_cancel)} workflows to cancel")
@ -311,7 +308,7 @@ def main(event):
print("PR marked with can be tested label, rerun workflow")
workflow_descriptions = get_workflows_description_for_pull_request(pull_request)
workflow_descriptions = (
workflow_descriptions or get_workflow_description_fallback(event_data)
workflow_descriptions or get_workflow_description_fallback(pull_request)
)
if not workflow_descriptions:
print("Not found any workflows")

View File

@ -10,7 +10,18 @@
<cache_enabled>0</cache_enabled>
<data_cache_max_size>22548578304</data_cache_max_size>
<cache_on_write_operations>1</cache_on_write_operations>
<data_cache_path>./s3_cache/</data_cache_path>
</s3_cache>
<s3_cache_2>
<type>s3</type>
<endpoint>http://localhost:11111/test/00170_test/</endpoint>
<access_key_id>clickhouse</access_key_id>
<secret_access_key>clickhouse</secret_access_key>
<data_cache_enabled>1</data_cache_enabled>
<cache_enabled>0</cache_enabled>
<data_cache_max_size>22548578304</data_cache_max_size>
<cache_on_write_operations>0</cache_on_write_operations>
</s3_cache_2>
</disks>
<policies>
<s3_cache>
@ -20,6 +31,13 @@
</main>
</volumes>
</s3_cache>
<s3_cache_2>
<volumes>
<main>
<disk>s3_cache_2</disk>
</main>
</volumes>
</s3_cache_2>
</policies>
</storage_configuration>
</clickhouse>

View File

@ -1311,7 +1311,7 @@
"toSecond"
"toStartOfDay"
"toStartOfFifteenMinutes"
"toStartOfFiveMinute"
"toStartOfFiveMinutes"
"toStartOfHour"
"toStartOfInterval"
"toStartOfISOYear"

View File

@ -507,7 +507,7 @@
"toStartOfFifteenMinutes"
"emptyArrayUInt8"
"dictGetUInt8"
"toStartOfFiveMinute"
"toStartOfFiveMinutes"
"cbrt"
"toStartOfMinute"
"dictGet"

View File

@ -16,7 +16,7 @@
<value>toISOYear</value>
<value>toStartOfMinute</value>
<value>toStartOfFiveMinute</value>
<value>toStartOfFiveMinutes</value>
<value>toStartOfFifteenMinutes</value>
<value>toStartOfHour</value>
<value>toStartOfDay</value>

View File

@ -107,7 +107,7 @@ toStartOfMinute
2019-02-06 19:57:00
2019-02-07 04:57:00
2019-02-06 11:57:00
toStartOfFiveMinute
toStartOfFiveMinutes
2019-02-06 22:55:00
2019-02-06 20:55:00
2019-02-06 19:55:00

View File

@ -162,14 +162,14 @@ SELECT toString(toStartOfMinute(toDateTime(1549483055), 'Europe/London'), 'Europ
SELECT toString(toStartOfMinute(toDateTime(1549483055), 'Asia/Tokyo'), 'Asia/Tokyo');
SELECT toString(toStartOfMinute(toDateTime(1549483055), 'Pacific/Pitcairn'), 'Pacific/Pitcairn');
/* toStartOfFiveMinute */
/* toStartOfFiveMinutes */
SELECT 'toStartOfFiveMinute';
SELECT toString(toStartOfFiveMinute(toDateTime(1549483055), 'Europe/Moscow'), 'Europe/Moscow');
SELECT toString(toStartOfFiveMinute(toDateTime(1549483055), 'Europe/Paris'), 'Europe/Paris');
SELECT toString(toStartOfFiveMinute(toDateTime(1549483055), 'Europe/London'), 'Europe/London');
SELECT toString(toStartOfFiveMinute(toDateTime(1549483055), 'Asia/Tokyo'), 'Asia/Tokyo');
SELECT toString(toStartOfFiveMinute(toDateTime(1549483055), 'Pacific/Pitcairn'), 'Pacific/Pitcairn');
SELECT 'toStartOfFiveMinutes';
SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Europe/Moscow'), 'Europe/Moscow');
SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Europe/Paris'), 'Europe/Paris');
SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Europe/London'), 'Europe/London');
SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Asia/Tokyo'), 'Asia/Tokyo');
SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Pacific/Pitcairn'), 'Pacific/Pitcairn');
/* toStartOfTenMinutes */

View File

@ -199,7 +199,7 @@ SELECT toMinute(NULL);
SELECT toStartOfDay(NULL);
SELECT toMonday(NULL);
SELECT toUInt16OrZero(NULL);
SELECT toStartOfFiveMinute(NULL);
SELECT toStartOfFiveMinutes(NULL);
SELECT halfMD5(NULL);
SELECT toStartOfHour(NULL);
SELECT toRelativeYearNum(NULL);

View File

@ -28,7 +28,7 @@ toStartOfWeek(N, 'Europe/Moscow')
toStartOfDay(N, 'Europe/Moscow')
toStartOfHour(N, 'Europe/Moscow')
toStartOfMinute(N, 'Europe/Moscow')
toStartOfFiveMinute(N, 'Europe/Moscow')
toStartOfFiveMinutes(N, 'Europe/Moscow')
toStartOfTenMinutes(N, 'Europe/Moscow')
toStartOfFifteenMinutes(N, 'Europe/Moscow')
toStartOfInterval(N, INTERVAL 1 year, 'Europe/Moscow')

View File

@ -98,7 +98,7 @@ Code: 43
"DateTime('Europe/Moscow')","2019-09-16 19:20:00"
"DateTime('Europe/Moscow')","2019-09-16 19:20:00"
------------------------------------------
SELECT toStartOfFiveMinute(N, \'Europe/Moscow\')
SELECT toStartOfFiveMinutes(N, \'Europe/Moscow\')
Code: 43
"DateTime('Europe/Moscow')","2019-09-16 19:20:00"
"DateTime('Europe/Moscow')","2019-09-16 19:20:00"

View File

@ -1,3 +1,5 @@
-- Tags: no-parallel
DROP DICTIONARY IF EXISTS system.dict1;
CREATE DICTIONARY IF NOT EXISTS system.dict1

View File

@ -1,6 +1,8 @@
#!/usr/bin/env bash
# Tags: no-parallel
# Server may ignore some exceptions, but it still print exceptions to logs and (at least in CI) sends Error and Warning log messages to client
# making test fail because of non-empty stderr. Ignore such log messages.
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh

View File

@ -4,7 +4,7 @@ drop table if exists mt2;
create table mt1 (n Int64) engine=MergeTree order by n;
create table mt2 (n Int64) engine=MergeTree order by n;
commit; -- { serverError INVALID_TRANSACTION }
commit; -- { serverError INVALID_TRANSACTION } -- no transaction
rollback; -- { serverError INVALID_TRANSACTION }
begin transaction;
@ -31,7 +31,7 @@ select 'on exception before start', arraySort(groupArray(n)) from (select n from
-- rollback on exception before start
select functionThatDoesNotExist(); -- { serverError 46 }
-- cannot commit after exception
commit; -- { serverError INVALID_TRANSACTION }
commit; -- { serverError INVALID_TRANSACTION } -- after 46
begin transaction; -- { serverError INVALID_TRANSACTION }
rollback;
@ -42,7 +42,7 @@ select 'on exception while processing', arraySort(groupArray(n)) from (select n
-- rollback on exception while processing
select throwIf(100 < number) from numbers(1000); -- { serverError 395 }
-- cannot commit after exception
commit; -- { serverError INVALID_TRANSACTION }
commit; -- { serverError INVALID_TRANSACTION } -- after 395
insert into mt1 values (5); -- { serverError INVALID_TRANSACTION }
insert into mt2 values (50); -- { serverError INVALID_TRANSACTION }
select 1; -- { serverError INVALID_TRANSACTION }
@ -52,10 +52,9 @@ begin transaction;
insert into mt1 values (6);
insert into mt2 values (60);
select 'on session close', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2);
-- trigger reconnection by error on client, check rollback on session close
insert into mt1 values ([1]); -- { clientError 43 }
commit; -- { serverError INVALID_TRANSACTION }
rollback; -- { serverError INVALID_TRANSACTION }
-- INSERT failures does not produce client reconnect anymore, so rollback can be done
rollback;
begin transaction;
insert into mt1 values (7);
@ -82,19 +81,19 @@ rollback;
begin transaction;
create table m (n int) engine=Memory; -- { serverError 48 }
commit; -- { serverError INVALID_TRANSACTION }
commit; -- { serverError INVALID_TRANSACTION } -- after 48
rollback;
create table m (n int) engine=Memory;
begin transaction;
insert into m values (1); -- { serverError 48 }
select * from m; -- { serverError INVALID_TRANSACTION }
commit; -- { serverError INVALID_TRANSACTION }
commit; -- { serverError INVALID_TRANSACTION } -- after 48
rollback;
begin transaction;
select * from m; -- { serverError 48 }
commit; -- { serverError INVALID_TRANSACTION }
commit; -- { serverError INVALID_TRANSACTION } -- after 48
rollback;
drop table m;

View File

@ -1,2 +1,2 @@
1
Test OK
OSReadChars
OSCPUVirtualTimeMicroseconds

View File

@ -8,46 +8,14 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
function read_numbers_func()
{
$CLICKHOUSE_CLIENT -q "
SELECT * FROM numbers(600000000) FORMAT Null SETTINGS max_threads = 1
";
}
# NOTE: netlink taskstruct interface uses rounding to 1KB [1], so we cannot use ${BASH_SOURCE[0]}
#
# [1]: https://elixir.bootlin.com/linux/v5.18-rc4/source/kernel/tsacct.c#L101
tmp_path=$(mktemp "$CURDIR/01268_procfs_metrics.XXXXXX")
trap 'rm -f $tmp_path' EXIT
truncate -s1025 "$tmp_path"
function show_processes_func()
{
while true; do
sleep 0.1;
# These two system metrics for the generating query above are guaranteed to be nonzero when ProcFS is mounted at /proc
$CLICKHOUSE_CLIENT -q "
SELECT count() > 0 FROM system.processes\
WHERE ProfileEvents['OSCPUVirtualTimeMicroseconds'] > 0 AND ProfileEvents['OSReadChars'] > 0 \
SETTINGS max_threads = 1
" | grep '1' && break;
done
}
export -f read_numbers_func;
export -f show_processes_func;
TIMEOUT=3
timeout $TIMEOUT bash -c read_numbers_func &
timeout $TIMEOUT bash -c show_processes_func &
wait
# otherwise it can be alive after test
query_alive=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM system.processes WHERE query ILIKE 'SELECT * FROM numbers(600000000)%'")
while [[ $query_alive != 0 ]]
do
$CLICKHOUSE_CLIENT -q "KILL QUERY WHERE query ilike '%SELECT * FROM numbers(600000000)%'" 2> /dev/null 1> /dev/null
sleep 0.5
query_alive=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM system.processes WHERE query ILIKE 'SELECT * FROM numbers(600000000)%'")
done
echo "Test OK"
$CLICKHOUSE_LOCAL --profile-events-delay-ms=-1 --print-profile-events -q "SELECT * FROM file('$tmp_path', 'LineAsString') FORMAT Null" |& grep -m1 -F -o -e OSReadChars
# NOTE: that OSCPUVirtualTimeMicroseconds is in microseconds, so 1e6 is not enough.
$CLICKHOUSE_LOCAL --profile-events-delay-ms=-1 --print-profile-events -q "SELECT * FROM numbers(10e6) FORMAT Null" |& grep -m1 -F -o -e OSCPUVirtualTimeMicroseconds
exit 0

View File

@ -10,46 +10,40 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
function thread_create {
while true; do
$CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS $1 (x UInt64, s Array(Nullable(String))) ENGINE = $2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|57)'
sleep 0.0$RANDOM
done
function thread_create()
{
$CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS $1 (x UInt64, s Array(Nullable(String))) ENGINE = $2" 2>&1 | grep -v -F 'Received exception from server' | grep -v -P 'Code: (60|57)'
sleep 0.0$RANDOM
}
function thread_drop {
while true; do
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS $1" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|57)'
sleep 0.0$RANDOM
done
function thread_drop()
{
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS $1" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|57)'
sleep 0.0$RANDOM
}
function thread_rename {
while true; do
$CLICKHOUSE_CLIENT --query "RENAME TABLE $1 TO $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|57)'
sleep 0.0$RANDOM
done
function thread_rename()
{
$CLICKHOUSE_CLIENT --query "RENAME TABLE $1 TO $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|57)'
sleep 0.0$RANDOM
}
function thread_select {
while true; do
$CLICKHOUSE_CLIENT --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)'
sleep 0.0$RANDOM
done
function thread_select()
{
$CLICKHOUSE_CLIENT --query "SELECT * FROM $1 FORMAT Null" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)'
sleep 0.0$RANDOM
}
function thread_insert {
while true; do
$CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT rand64(1), [toString(rand64(2))] FROM numbers($2)" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)'
sleep 0.0$RANDOM
done
function thread_insert()
{
$CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT rand64(1), [toString(rand64(2))] FROM numbers($2)" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)'
sleep 0.0$RANDOM
}
function thread_insert_select {
while true; do
$CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)'
sleep 0.0$RANDOM
done
function thread_insert_select()
{
$CLICKHOUSE_CLIENT --query "INSERT INTO $1 SELECT * FROM $2" 2>&1 | grep -v -e 'Received exception from server' -e '^(query: ' | grep -v -P 'Code: (60|218)'
sleep 0.0$RANDOM
}
export -f thread_create
@ -65,18 +59,18 @@ export -f thread_insert_select
function test_with_engine {
echo "Testing $1"
timeout 10 bash -c "thread_create t1 $1" &
timeout 10 bash -c "thread_create t2 $1" &
timeout 10 bash -c 'thread_drop t1' &
timeout 10 bash -c 'thread_drop t2' &
timeout 10 bash -c 'thread_rename t1 t2' &
timeout 10 bash -c 'thread_rename t2 t1' &
timeout 10 bash -c 'thread_select t1' &
timeout 10 bash -c 'thread_select t2' &
timeout 10 bash -c 'thread_insert t1 5' &
timeout 10 bash -c 'thread_insert t2 10' &
timeout 10 bash -c 'thread_insert_select t1 t2' &
timeout 10 bash -c 'thread_insert_select t2 t1' &
clickhouse_client_loop_timeout 10 thread_create t1 $1 &
clickhouse_client_loop_timeout 10 thread_create t2 $1 &
clickhouse_client_loop_timeout 10 thread_drop t1 &
clickhouse_client_loop_timeout 10 thread_drop t2 &
clickhouse_client_loop_timeout 10 thread_rename t1 t2 &
clickhouse_client_loop_timeout 10 thread_rename t2 t1 &
clickhouse_client_loop_timeout 10 thread_select t1 &
clickhouse_client_loop_timeout 10 thread_select t2 &
clickhouse_client_loop_timeout 10 thread_insert t1 5 &
clickhouse_client_loop_timeout 10 thread_insert t2 10 &
clickhouse_client_loop_timeout 10 thread_insert_select t1 t2 &
clickhouse_client_loop_timeout 10 thread_insert_select t2 t1 &
wait
echo "Done $1"

View File

@ -0,0 +1,3 @@
{"data":{"k1":0,"k2":2}}
{"data":{"k1":1,"k2":0}}
Tuple(k1 Int8, k2 Int8)

View File

@ -0,0 +1,25 @@
-- Tags: no-fasttest
SET allow_experimental_object_type = 1;
SET output_format_json_named_tuples_as_objects = 1;
DROP TABLE IF EXISTS t_json_wide_parts;
CREATE TABLE t_json_wide_parts (data JSON)
ENGINE MergeTree ORDER BY tuple()
SETTINGS min_bytes_for_wide_part = 0;
SYSTEM STOP MERGES t_json_wide_parts;
INSERT INTO t_json_wide_parts VALUES ('{"k1": 1}');
INSERT INTO t_json_wide_parts VALUES ('{"k2": 2}');
SYSTEM START MERGES t_json_wide_parts;
OPTIMIZE TABLE t_json_wide_parts FINAL;
SELECT data FROM t_json_wide_parts ORDER BY data.k1 FORMAT JSONEachRow;
SELECT type FROM system.parts_columns
WHERE table = 't_json_wide_parts' AND database = currentDatabase() AND active;
DROP TABLE t_json_wide_parts;

View File

@ -104,7 +104,7 @@
2021-01-01
-------toStartOfSecond---------
-------toStartOfMinute---------
-------toStartOfFiveMinute---------
-------toStartOfFiveMinutes---------
-------toStartOfTenMinutes---------
-------toStartOfFifteenMinutes---------
-------toStartOfHour---------

View File

@ -46,8 +46,8 @@ select '-------toStartOfSecond---------';
select toStartOfSecond(x1) from t1; -- { serverError 43 }
select '-------toStartOfMinute---------';
select toStartOfMinute(x1) from t1; -- { serverError 43 }
select '-------toStartOfFiveMinute---------';
select toStartOfFiveMinute(x1) from t1; -- { serverError 43 }
select '-------toStartOfFiveMinutes---------';
select toStartOfFiveMinutes(x1) from t1; -- { serverError 43 }
select '-------toStartOfTenMinutes---------';
select toStartOfTenMinutes(x1) from t1; -- { serverError 43 }
select '-------toStartOfFifteenMinutes---------';

View File

@ -5,7 +5,7 @@ timeZoneOffset(t): -2670
formatDateTime(t, '%F %T', 'Africa/Monrovia'): 1970-06-17 07:39:21
toString(t, 'Africa/Monrovia'): 1970-06-17 07:39:21
toStartOfMinute(t): 1970-06-17 07:39:00
toStartOfFiveMinute(t): 1970-06-17 07:35:00
toStartOfFiveMinutes(t): 1970-06-17 07:35:00
toStartOfFifteenMinutes(t): 1970-06-17 07:30:00
toStartOfTenMinutes(t): 1970-06-17 07:30:00
toStartOfHour(t): 1970-06-17 07:00:00

View File

@ -5,7 +5,7 @@ SELECT toUnixTimestamp(t),
formatDateTime(t, '%F %T', 'Africa/Monrovia'),
toString(t, 'Africa/Monrovia'),
toStartOfMinute(t),
toStartOfFiveMinute(t),
toStartOfFiveMinutes(t),
toStartOfFifteenMinutes(t),
toStartOfTenMinutes(t),
toStartOfHour(t),

View File

@ -12,7 +12,7 @@ select toStartOfYear(toDate(0));
select toStartOfYear(toDateTime(0, 'Europe/Moscow'));
select toTime(toDateTime(0, 'Europe/Moscow'));
select toStartOfMinute(toDateTime(0, 'Europe/Moscow'));
select toStartOfFiveMinute(toDateTime(0, 'Europe/Moscow'));
select toStartOfFiveMinutes(toDateTime(0, 'Europe/Moscow'));
select toStartOfTenMinutes(toDateTime(0, 'Europe/Moscow'));
select toStartOfFifteenMinutes(toDateTime(0, 'Europe/Moscow'));
select toStartOfHour(toDateTime(0, 'Europe/Moscow'));
@ -25,7 +25,7 @@ select toStartOfQuarter(toDateTime(0, 'America/Los_Angeles'));
select toStartOfYear(toDateTime(0, 'America/Los_Angeles'));
select toTime(toDateTime(0, 'America/Los_Angeles'), 'America/Los_Angeles');
select toStartOfMinute(toDateTime(0, 'America/Los_Angeles'));
select toStartOfFiveMinute(toDateTime(0, 'America/Los_Angeles'));
select toStartOfFiveMinutes(toDateTime(0, 'America/Los_Angeles'));
select toStartOfTenMinutes(toDateTime(0, 'America/Los_Angeles'));
select toStartOfFifteenMinutes(toDateTime(0, 'America/Los_Angeles'));
select toStartOfHour(toDateTime(0, 'America/Los_Angeles'));

View File

@ -1,3 +1,5 @@
-- Tags: no-random-settings
create table if not exists t_group_by_lowcardinality(p_date Date, val LowCardinality(Nullable(String)))
engine=MergeTree() partition by p_date order by tuple();

View File

@ -0,0 +1,40 @@
-- Tags: replica, long
-- Regression test for possible CHECKSUM_DOESNT_MATCH due to per-column TTL bug.
-- That had been fixed in https://github.com/ClickHouse/ClickHouse/pull/35820
drop table if exists ttl_02265;
drop table if exists ttl_02265_r2;
-- The bug is appears only for Wide part.
create table ttl_02265 (date Date, key Int, value String TTL date + interval 1 month) engine=ReplicatedMergeTree('/clickhouse/tables/{database}/ttl_02265', 'r1') order by key partition by date settings min_bytes_for_wide_part=0;
create table ttl_02265_r2 (date Date, key Int, value String TTL date + interval 1 month) engine=ReplicatedMergeTree('/clickhouse/tables/{database}/ttl_02265', 'r2') order by key partition by date settings min_bytes_for_wide_part=0;
-- after, 20100101_0_0_0 will have ttl.txt and value.bin
insert into ttl_02265 values ('2010-01-01', 2010, 'foo');
-- after, 20100101_0_0_1 will not have neither ttl.txt nor value.bin
optimize table ttl_02265 final;
-- after, 20100101_0_0_2 will not have ttl.txt, but will have value.bin
optimize table ttl_02265 final;
system sync replica ttl_02265;
-- after detach/attach it will not have TTL in-memory, and will not have ttl.txt
detach table ttl_02265;
attach table ttl_02265;
-- So now the state for 20100101_0_0_2 is as follow:
--
-- table | in_memory_ttl | ttl.txt | value.bin/mrk2
-- ttl_02265 | N | N | N
-- ttl_02265_r2 | Y | N | N
--
-- And hence on the replica that does not have TTL in-memory (this replica),
-- it will try to apply TTL, and the column will be dropped,
-- but on another replica the column won't be dropped since it has in-memory TTL and will not apply TTL.
-- and eventually this will lead to the following error:
--
-- MergeFromLogEntryTask: Code: 40. DB::Exception: Part 20100101_0_0_3 from r2 has different columns hash. (CHECKSUM_DOESNT_MATCH) (version 22.4.1.1). Data after merge is not byte-identical to data on another replicas. There could be several reasons: 1. Using newer version of compression library after server update. 2. Using another compression method. 3. Non-deterministic compression algorithm (highly unlikely). 4. Non-deterministic merge algorithm due to logical error in code. 5. Data corruption in memory due to bug in code. 6. Data corruption in memory due to hardware issue. 7. Manual modification of source data after server startup. 8. Manual modification of checksums stored in ZooKeeper. 9. Part format related settings like 'enable_mixed_granularity_parts' are different on different replicas. We will download merged part from replica to force byte-identical result.
--
optimize table ttl_02265 final;
system flush logs;
select * from system.part_log where database = currentDatabase() and table like 'ttl_02265%' and error != 0;

View File

@ -0,0 +1,6 @@
select toStartOfInterval(toDateTime64('\0930-12-12 12:12:12.1234567', 3), toIntervalNanosecond(1024)); -- {serverError 407}
SELECT
toDateTime64(-9223372036854775808, 1048575, toIntervalNanosecond(9223372036854775806), NULL),
toStartOfInterval(toDateTime64(toIntervalNanosecond(toIntervalNanosecond(257), toDateTime64(toStartOfInterval(toDateTime64(NULL)))), '', 100), toIntervalNanosecond(toStartOfInterval(toDateTime64(toIntervalNanosecond(NULL), NULL)), -1)),
toStartOfInterval(toDateTime64('\0930-12-12 12:12:12.1234567', 3), toIntervalNanosecond(1024)); -- {serverError 407}

View File

@ -0,0 +1,30 @@
-- { echo }
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
SYSTEM DROP FILESYSTEM CACHE;
SELECT count() FROM system.filesystem_cache;
0
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
SELECT count() FROM system.filesystem_cache;
2
SYSTEM DROP FILESYSTEM CACHE FORCE;
SELECT count() FROM system.filesystem_cache;
0
SELECT * FROM test FORMAT Null;
SELECT count() FROM system.filesystem_cache;
1
SYSTEM DROP FILESYSTEM CACHE './data'; -- { serverError 36 }
SELECT count() FROM system.filesystem_cache;
1
DROP TABLE IF EXISTS test2;
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_2', min_bytes_for_wide_part = 10485760;
INSERT INTO test2 SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test2 FORMAT Null;
SELECT count() FROM system.filesystem_cache;
3
SYSTEM DROP FILESYSTEM CACHE './s3_cache/';
SELECT count() FROM system.filesystem_cache;
2

View File

@ -0,0 +1,34 @@
-- Tags: no-parallel, no-fasttest, no-s3-storage
-- { echo }
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache', min_bytes_for_wide_part = 10485760;
SYSTEM DROP FILESYSTEM CACHE;
SELECT count() FROM system.filesystem_cache;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
SELECT count() FROM system.filesystem_cache;
SYSTEM DROP FILESYSTEM CACHE FORCE;
SELECT count() FROM system.filesystem_cache;
SELECT * FROM test FORMAT Null;
SELECT count() FROM system.filesystem_cache;
SYSTEM DROP FILESYSTEM CACHE './data'; -- { serverError 36 }
SELECT count() FROM system.filesystem_cache;
DROP TABLE IF EXISTS test2;
CREATE TABLE test2 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_2', min_bytes_for_wide_part = 10485760;
INSERT INTO test2 SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test2 FORMAT Null;
SELECT count() FROM system.filesystem_cache;
SYSTEM DROP FILESYSTEM CACHE './s3_cache/';
SELECT count() FROM system.filesystem_cache;

View File

@ -54,7 +54,7 @@
* 4.2.0.4.19 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfHour](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofhour)
* 4.2.0.4.20 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMinute](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofminute)
* 4.2.0.4.21 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfSecond](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofsecond)
* 4.2.0.4.22 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoffiveminute)
* 4.2.0.4.22 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoffiveminutes)
* 4.2.0.4.23 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoftenminutes)
* 4.2.0.4.24 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFifteenMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoffifteenminutes)
* 4.2.0.4.25 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfInterval](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofinterval)
@ -417,10 +417,10 @@ version: 1.0
[ClickHouse] SHALL support correct operation of the [toStartOfSecond](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofsecond)
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinutes
version: 1.0
[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminute)
[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminutes)
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes

View File

@ -714,15 +714,15 @@ RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfSecond = Req
num="4.2.0.4.21",
)
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinute = Requirement(
name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute",
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinutes = Requirement(
name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinutes",
version="1.0",
priority=None,
group=None,
type=None,
uid=None,
description=(
"[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminute)\n"
"[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminutes)\n"
"function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].\n"
"\n"
),
@ -1944,7 +1944,7 @@ SRS_010_ClickHouse_DateTime64_Extended_Range = Specification(
num="4.2.0.4.21",
),
Heading(
name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute",
name="RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinutes",
level=5,
num="4.2.0.4.22",
),
@ -2282,7 +2282,7 @@ SRS_010_ClickHouse_DateTime64_Extended_Range = Specification(
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfHour,
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfMinute,
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfSecond,
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinute,
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinutes,
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfTenMinutes,
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFifteenMinutes,
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfInterval,
@ -2399,7 +2399,7 @@ SRS_010_ClickHouse_DateTime64_Extended_Range = Specification(
* 4.2.0.4.19 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfHour](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofhour)
* 4.2.0.4.20 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfMinute](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofminute)
* 4.2.0.4.21 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfSecond](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofsecond)
* 4.2.0.4.22 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoffiveminute)
* 4.2.0.4.22 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoffiveminutes)
* 4.2.0.4.23 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoftenminutes)
* 4.2.0.4.24 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFifteenMinutes](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartoffifteenminutes)
* 4.2.0.4.25 [RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfInterval](#rqsrs-010datetime64extendedrangedatesandtimesfunctionstostartofinterval)
@ -2762,10 +2762,10 @@ version: 1.0
[ClickHouse] SHALL support correct operation of the [toStartOfSecond](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartofsecond)
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinute
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfFiveMinutes
version: 1.0
[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinute](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminute)
[ClickHouse] SHALL support correct operation of the [toStartOfFiveMinutes](https://clickhouse.com/docs/en/sql-reference/functions/date-time-functions/#tostartoffiveminutes)
function used with the [DateTime64] data type when it stores dates within the [normal date range] and the [extended date range].
###### RQ.SRS-010.DateTime64.ExtendedRange.DatesAndTimesFunctions.toStartOfTenMinutes

View File

@ -576,13 +576,13 @@ def to_start_of_minutes_interval(self, interval, func):
@TestScenario
@Requirements(
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinute(
RQ_SRS_010_DateTime64_ExtendedRange_DatesAndTimesFunctions_toStartOfFiveMinutes(
"1.0"
)
)
def to_start_of_five_minute(self):
"""Check the toStartOfFiveMinute with DateTime64 extended range."""
to_start_of_minutes_interval(interval=5, func="toStartOfFiveMinute")
def to_start_of_five_minutes(self):
"""Check the toStartOfFiveMinutes with DateTime64 extended range."""
to_start_of_minutes_interval(interval=5, func="toStartOfFiveMinutes")
@TestScenario

View File

@ -249,7 +249,7 @@ std::map<std::string, ColumnType> func_to_param_type = {
{"alphatokens", Type::s}, {"toyear", Type::d | Type::dt}, {"tomonth", Type::d | Type::dt}, {"todayofmonth", Type::d | Type::dt}, {"tohour", Type::dt},
{"tominute", Type::dt}, {"tosecond", Type::dt}, {"touixtimestamp", Type::dt}, {"tostartofyear", Type::d | Type::dt},
{"tostartofquarter", Type::d | Type::dt}, {"tostartofmonth", Type::d | Type::dt}, {"tomonday", Type::d | Type::dt},
{"tostartoffiveminute", Type::dt}, {"tostartoftenminutes", Type::dt}, {"tostartoffifteenminutes", Type::d | Type::dt},
{"tostartoffiveminutes", Type::dt}, {"tostartoftenminutes", Type::dt}, {"tostartoffifteenminutes", Type::d | Type::dt},
{"tostartofinterval", Type::d | Type::dt}, {"totime", Type::d | Type::dt}, {"torelativehonthnum", Type::d | Type::dt},
{"torelativeweeknum", Type::d | Type::dt}, {"torelativedaynum", Type::d | Type::dt}, {"torelativehournum", Type::d | Type::dt},
{"torelativeminutenum", Type::d | Type::dt}, {"torelativesecondnum", Type::d | Type::dt}, {"datediff", Type::d | Type::dt},

View File

@ -1,3 +1,4 @@
v22.4.3.3-stable 2022-04-26
v22.4.2.1-stable 2022-04-22
v22.3.3.44-lts 2022-04-06
v22.3.2.2-lts 2022-03-17

1 v22.4.2.1-stable v22.4.3.3-stable 2022-04-22 2022-04-26
1 v22.4.3.3-stable 2022-04-26
2 v22.4.2.1-stable v22.4.2.1-stable 2022-04-22 2022-04-22
3 v22.3.3.44-lts v22.3.3.44-lts 2022-04-06 2022-04-06
4 v22.3.2.2-lts v22.3.2.2-lts 2022-03-17 2022-03-17

View File

@ -0,0 +1,52 @@
{% extends 'templates/base.html' %}
{% set title = 'Performance comparison of different ClickHouse versions' %}
{% set extra_js = [
'queries.js?' + rev_short,
'results.js?' + rev_short,
'../benchmark.js?' + rev_short]
%}
{% set url = 'https://clickhouse.com/benchmark/versions/' %}
{% set no_footer = True %}
{% block content %}
<div class="container-fluid py-3">
<div class="row mb-3">
<div class="col d-flex align-items-center">
<a href="/" title="Main page" class="float-left mr-3">
<img src="/docs/images/logo.svg" alt="ClickHouse" />
</a>
<h1 class="h2 mb-0">{{ title }}</h1>
</div>
</div>
<div class="row mb-3">
<div id="selectors" class="col"></div>
</div>
<div class="row mb-3">
<div class="col">
<h2 class="h4 my-3">Relative query processing time <span class="text-muted">(lower is better)</span></h2>
<div id="diagram"></div>
</div>
</div>
<div class="row mb-3">
<div class="col">
<h2 class="h4 my-3">Full results</h2>
<div id="comparison_table" class="overflow-auto"></div>
</div>
</div>
<div class="row">
<div class="col">
<h2 class="h4 my-3">Comments</h2>
<p>Hardware used to run tests: x86_64 AWS m5.8xlarge Ubuntu 20.04.</p>
<p>Old versions had no support of this features/syntax: toISOWeek, SUBSTRING, EXTRACT, WITH. Queries were changed. All versions runs use same queries. </p>
<p>Star Schema Benchmark Patrick O'Neil, Elizabeth (Betty) O'Neil and Xuedong Chen. "The Star Schema Benchmark," Online Publication of Database Generation program., January 2007. <a href="http://www.cs.umb.edu/~poneil/StarSchemaB.pdf">http://www.cs.umb.edu/~poneil/StarSchemaB.pdf</a></p>
<br/>
</p>
</div>
</div>
{% endblock %}

View File

@ -0,0 +1,291 @@
var current_data_size = 0;
var current_systems = [];
var queries =
[
{
"query": "SELECT machine_name, MIN(cpu) AS cpu_min, MAX(cpu) AS cpu_max, AVG(cpu) AS cpu_avg, MIN(net_in) AS net_in_min, MAX(net_in) AS net_in_max, AVG(net_in) AS net_in_avg, MIN(net_out) AS net_out_min, MAX(net_out) AS net_out_max, AVG(net_out) AS net_out_avg FROM ( SELECT machine_name, ifNull(cpu_user, 0.0) AS cpu, ifNull(bytes_in, 0.0) AS net_in, ifNull(bytes_out, 0.0) AS net_out FROM mgbench.logs1 WHERE machine_name IN ('anansi','aragog','urd') AND log_time >= toDateTime('2017-01-11 00:00:00')) AS r GROUP BY machine_name;",
"comment": "Q1.1: What is the CPU/network utilization for each web server since midnight?",
},
{
"query": "SELECT machine_name, log_time FROM mgbench.logs1 WHERE (machine_name LIKE 'cslab%' OR machine_name LIKE 'mslab%') AND load_one IS NULL AND log_time >= toDateTime('2017-01-10 00:00:00') ORDER BY machine_name, log_time;",
"comment": "Q1.2: Which computer lab machines have been offline in the past day?",
},
{
"query": "SELECT dt, hr, AVG(load_fifteen) AS load_fifteen_avg, AVG(load_five) AS load_five_avg, AVG(load_one) AS load_one_avg, AVG(mem_free) AS mem_free_avg, AVG(swap_free) AS swap_free_avg FROM ( SELECT CAST(log_time AS DATE) AS dt, toHour(log_time) AS hr, load_fifteen, load_five, load_one, mem_free, swap_free FROM mgbench.logs1 WHERE machine_name = 'babbage' AND load_fifteen IS NOT NULL AND load_five IS NOT NULL AND load_one IS NOT NULL AND mem_free IS NOT NULL AND swap_free IS NOT NULL AND log_time >= toDateTime('2017-01-01 00:00:00')) AS r GROUP BY dt, hr ORDER BY dt, hr;",
"comment": "Q1.3: What are the hourly average metrics during the past 10 days for a specific workstation?",
},
{
"query": "SELECT machine_name, COUNT(*) AS spikes FROM mgbench.logs1 WHERE machine_group = 'Servers' AND cpu_wio > 0.99 AND log_time >= toDateTime('2016-12-01 00:00:00') AND log_time < toDateTime('2017-01-01 00:00:00') GROUP BY machine_name ORDER BY spikes DESC LIMIT 10;",
"comment": "Q1.4: Over 1 month, how often was each server blocked on disk I/O?",
},
{
"query": "SELECT machine_name, dt, MIN(mem_free) AS mem_free_min FROM ( SELECT machine_name, CAST(log_time AS DATE) AS dt, mem_free FROM mgbench.logs1 WHERE machine_group = 'DMZ' AND mem_free IS NOT NULL ) AS r GROUP BY machine_name, dt HAVING MIN(mem_free) < 10000 ORDER BY machine_name, dt;",
"comment": "Q1.5: Which externally reachable VMs have run low on memory?",
},
{
"query": "SELECT dt, hr, SUM(net_in) AS net_in_sum, SUM(net_out) AS net_out_sum, SUM(net_in) + SUM(net_out) AS both_sum FROM ( SELECT CAST(log_time AS DATE) AS dt, toHour(log_time) AS hr, ifNull(bytes_in, 0.0) / 1000000000.0 AS net_in, ifNull(bytes_out, 0.0) / 1000000000.0 AS net_out FROM mgbench.logs1 WHERE machine_name IN ('allsorts','andes','bigred','blackjack','bonbon','cadbury','chiclets','cotton','crows','dove','fireball','hearts','huey','lindt','milkduds','milkyway','mnm','necco','nerds','orbit','peeps','poprocks','razzles','runts','smarties','smuggler','spree','stride','tootsie','trident','wrigley','york') ) AS r GROUP BY dt, hr ORDER BY both_sum DESC LIMIT 10;",
"comment": "Q1.6: What is the total hourly network traffic across all file servers?",
},
{
"query": "SELECT * FROM mgbench.logs2 WHERE status_code >= 500 AND log_time >= toDateTime('2012-12-18 00:00:00') ORDER BY log_time;",
"comment": "Q2.1: Which requests have caused server errors within the past 2 weeks?",
},
{
"query": "SELECT * FROM mgbench.logs2 WHERE status_code >= 200 AND status_code < 300 AND request LIKE '%/etc/passwd%' AND log_time >= toDateTime('2012-05-06 00:00:00') AND log_time < toDateTime('2012-05-20 00:00:00');",
"comment": "Q2.3: What was the average path depth for top-level requests in the past month?",
},
{
"query": "SELECT top_level, AVG(length(request) - length(replaceOne(request, '/',''))) AS depth_avg FROM ( SELECT substring(request, 1, len) AS top_level, request FROM ( SELECT position('/', substring(request, 2)) AS len, request FROM mgbench.logs2 WHERE status_code >= 200 AND status_code < 300 AND log_time >= toDateTime('2012-12-01 00:00:00')) AS r WHERE len > 0 ) AS s WHERE top_level IN ('/about','/courses','/degrees','/events','/grad','/industry','/news','/people','/publications','/research','/teaching','/ugrad') GROUP BY top_level ORDER BY top_level;",
"comment": "Q2.2: During a specific 2-week period, was the user password file leaked?",
},
{
"query": "SELECT client_ip, COUNT(*) AS num_requests FROM mgbench.logs2 WHERE log_time >= toDateTime('2012-10-01 00:00:00') GROUP BY client_ip HAVING COUNT(*) >= 100000 ORDER BY num_requests DESC;",
"comment": "Q2.4: During the last 3 months, which clients have made an excessive number of requests?",
},
{
"query": "SELECT dt, COUNT(DISTINCT client_ip) FROM ( SELECT CAST(log_time AS DATE) AS dt, client_ip FROM mgbench.logs2) AS r GROUP BY dt ORDER BY dt;",
"comment": "Q2.5: What are the daily unique visitors?",
},
{
"query": "SELECT AVG(transfer) / 125000000.0 AS transfer_avg, MAX(transfer) / 125000000.0 AS transfer_max FROM ( SELECT log_time, SUM(object_size) AS transfer FROM mgbench.logs2 GROUP BY log_time) AS r;",
"comment": "Q2.6: What are the average and maximum data transfer rates (Gbps)?",
},
{
"query": "SELECT * FROM mgbench.logs3 WHERE event_type = 'temperature' AND event_value <= 32.0 AND log_time >= '2019-11-29 17:00:00';",
"comment": "Q3.1: Did the indoor temperature reach freezing over the weekend?",
},
{
"query": "SELECT device_name, device_floor, COUNT(*) AS ct FROM mgbench.logs3 WHERE event_type = 'door_open' AND log_time >= '2019-06-01 00:00:00' GROUP BY device_name, device_floor ORDER BY ct DESC;",
"comment": "Q3.4: Over the past 6 months, how frequently were each door opened?",
},
{
"query": "SELECT yr, mo, SUM(coffee_hourly_avg) AS coffee_monthly_sum, AVG(coffee_hourly_avg) AS coffee_monthly_avg, SUM(printer_hourly_avg) AS printer_monthly_sum, AVG(printer_hourly_avg) AS printer_monthly_avg, SUM(projector_hourly_avg) AS projector_monthly_sum, AVG(projector_hourly_avg) AS projector_monthly_avg, SUM(vending_hourly_avg) AS vending_monthly_sum, AVG(vending_hourly_avg) AS vending_monthly_avg FROM ( SELECT dt, yr, mo, hr, AVG(coffee) AS coffee_hourly_avg, AVG(printer) AS printer_hourly_avg, AVG(projector) AS projector_hourly_avg, AVG(vending) AS vending_hourly_avg FROM ( SELECT CAST(log_time AS DATE) AS dt, toYear(log_time) AS yr, EXTRACT(MONTH FROM log_time) AS mo, toHour(log_time) AS hr, CASE WHEN device_name LIKE 'coffee%' THEN event_value END AS coffee, CASE WHEN device_name LIKE 'printer%' THEN event_value END AS printer, CASE WHEN device_name LIKE 'projector%' THEN event_value END AS projector, CASE WHEN device_name LIKE 'vending%' THEN event_value END AS vending FROM mgbench.logs3 WHERE device_type = 'meter' ) AS r GROUP BY dt, yr, mo, hr ) AS s GROUP BY yr, mo ORDER BY yr, mo;",
"comment": " -- Q3.6: For each device category, what are the monthly power consumption metrics?",
},
{
"query": "SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue FROM lineorder_flat WHERE F_YEAR = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25;",
"comment": " -- Q1.1",
},
{
"query": "SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue FROM lineorder_flat WHERE toYYYYMM(LO_ORDERDATE) = 199401 AND LO_DISCOUNT BETWEEN 4 AND 6 AND LO_QUANTITY BETWEEN 26 AND 35;",
"comment": " -- Q1.2",
},
{
"query": "SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue FROM lineorder_flat WHERE toRelativeWeekNum(LO_ORDERDATE) - toRelativeWeekNum(toDate('1994-01-01')) = 6 AND F_YEAR = 1994 AND LO_DISCOUNT BETWEEN 5 AND 7 AND LO_QUANTITY BETWEEN 26 AND 35;",
"comment": " -- Q1.3",
},
{
"query": "SELECT sum(LO_REVENUE), F_YEAR AS year, P_BRAND FROM lineorder_flat WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA' GROUP BY year, P_BRAND ORDER BY year, P_BRAND;",
"comment": " -- Q2.1",
},
{
"query": "SELECT sum(LO_REVENUE), F_YEAR AS year, P_BRAND FROM lineorder_flat WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' GROUP BY year, P_BRAND ORDER BY year, P_BRAND;",
"comment": " -- Q2.2",
},
{
"query": "SELECT sum(LO_REVENUE), F_YEAR AS year, P_BRAND FROM lineorder_flat WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' GROUP BY year, P_BRAND ORDER BY year, P_BRAND;",
"comment": " -- Q2.3",
},
{
"query": "SELECT C_NATION, S_NATION, F_YEAR AS year, sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE C_REGION = 'ASIA' AND S_REGION = 'ASIA' AND year >= 1992 AND year <= 1997 GROUP BY C_NATION, S_NATION, year ORDER BY year ASC, revenue DESC;",
"comment": " -- Q3.1",
},
{
"query": "SELECT C_CITY, S_CITY, F_YEAR AS year, sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE C_NATION = 'UNITED STATES' AND S_NATION = 'UNITED STATES' AND year >= 1992 AND year <= 1997 GROUP BY C_CITY, S_CITY, year ORDER BY year ASC, revenue DESC;",
"comment": " -- Q3.2",
},
{
"query": "SELECT C_CITY, S_CITY, F_YEAR AS year, sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND year >= 1992 AND year <= 1997 GROUP BY C_CITY, S_CITY, year ORDER BY year ASC, revenue DESC;",
"comment": " -- Q3.3",
},
{
"query": "SELECT C_CITY, S_CITY, F_YEAR AS year, sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND toYYYYMM(LO_ORDERDATE) = 199712 GROUP BY C_CITY, S_CITY, year ORDER BY year ASC, revenue DESC;",
"comment": " -- Q3.4",
},
{
"query": "SELECT F_YEAR AS year, C_NATION, sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') GROUP BY year, C_NATION ORDER BY year ASC, C_NATION ASC;",
"comment": " -- Q4.1",
},
{
"query": "SELECT F_YEAR AS year, S_NATION, P_CATEGORY, sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (year = 1997 OR year = 1998) AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') GROUP BY year, S_NATION, P_CATEGORY ORDER BY year ASC, S_NATION ASC, P_CATEGORY ASC;",
"comment": " -- Q4.2",
},
{
"query": "SELECT F_YEAR AS year, S_CITY, P_BRAND, sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' GROUP BY year, S_CITY, P_BRAND ORDER BY year ASC, S_CITY ASC, P_BRAND ASC;",
"comment": " -- Q4.",
},
{
"query": "SELECT count() FROM hits",
"comment": "",
},
{
"query": "SELECT count() FROM hits WHERE AdvEngineID != 0",
"comment": "",
},
{
"query": "SELECT sum(AdvEngineID), count(), avg(ResolutionWidth) FROM hits",
"comment": "",
},
{
"query": "SELECT sum(UserID) FROM hits",
"comment": "",
},
{
"query": "SELECT uniq(UserID) FROM hits",
"comment": "",
},
{
"query": "SELECT uniq(SearchPhrase) FROM hits",
"comment": "",
},
{
"query": "SELECT min(EventDate), max(EventDate) FROM hits",
"comment": "",
},
{
"query": "SELECT AdvEngineID, count() FROM hits WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count() DESC",
"comment": "",
},
{
"query": "SELECT RegionID, uniq(UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT RegionID, sum(AdvEngineID), count() AS c, avg(ResolutionWidth), uniq(UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT MobilePhoneModel, uniq(UserID) AS u FROM hits WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT MobilePhone, MobilePhoneModel, uniq(UserID) AS u FROM hits WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT SearchPhrase, count() AS c FROM hits WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT SearchPhrase, uniq(UserID) AS u FROM hits WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT SearchEngineID, SearchPhrase, count() AS c FROM hits WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT UserID, count() FROM hits GROUP BY UserID ORDER BY count() DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT UserID, SearchPhrase, count() FROM hits GROUP BY UserID, SearchPhrase ORDER BY count() DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT UserID, SearchPhrase, count() FROM hits GROUP BY UserID, SearchPhrase LIMIT 10",
"comment": "",
},
{
"query": "SELECT UserID, toMinute(EventTime) AS m, SearchPhrase, count() FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY count() DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT UserID FROM hits WHERE UserID = 12345678901234567890",
"comment": "",
},
{
"query": "SELECT count() FROM hits WHERE URL LIKE '%metrika%'",
"comment": "",
},
{
"query": "SELECT SearchPhrase, any(URL), count() AS c FROM hits WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT SearchPhrase, any(URL), any(Title), count() AS c, uniq(UserID) FROM hits WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT * FROM hits WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10",
"comment": "",
},
{
"query": "SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10",
"comment": "",
},
{
"query": "SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10",
"comment": "",
},
{
"query": "SELECT SearchPhrase FROM hits WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10",
"comment": "",
},
{
"query": "SELECT CounterID, avg(length(URL)) AS l, count() AS c FROM hits WHERE URL != '' GROUP BY CounterID HAVING c > 100000 ORDER BY l DESC LIMIT 25",
"comment": "",
},
{
"query": "SELECT domainWithoutWWW(Referer) AS key, avg(length(Referer)) AS l, count() AS c, any(Referer) FROM hits WHERE Referer != '' GROUP BY key HAVING c > 100000 ORDER BY l DESC LIMIT 25",
"comment": "",
},
{
"query": "SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits",
"comment": "",
},
{
"query": "SELECT SearchEngineID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM hits WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM hits WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM hits GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT URL, count() AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT 1, URL, count() AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT ClientIP AS x, x - 1, x - 2, x - 3, count() AS c FROM hits GROUP BY x, x - 1, x - 2, x - 3 ORDER BY c DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT URL, count() AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND notEmpty(URL) GROUP BY URL ORDER BY PageViews DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT Title, count() AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND notEmpty(Title) GROUP BY Title ORDER BY PageViews DESC LIMIT 10",
"comment": "",
},
{
"query": "SELECT URL, count() AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000",
"comment": "",
},
{
"query": "SELECT TraficSourceID, SearchEngineID, AdvEngineID, ((SearchEngineID = 0 AND AdvEngineID = 0) ? Referer : '') AS Src, URL AS Dst, count() AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000",
"comment": "",
},
{
"query": "SELECT URLHash, EventDate, count() AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = halfMD5('http://yandex.ru/') GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100",
"comment": "",
},
{
"query": "SELECT WindowClientWidth, WindowClientHeight, count() AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = halfMD5('http://yandex.ru/') GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;",
"comment": "",
},
{
"query": "SELECT toStartOfMinute(EventTime) AS Minute, count() AS PageViews FROM hits WHERE CounterID = 62 AND EventDate >= toDate('2013-07-01') AND EventDate <= toDate('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;",
"comment": "",
}
];

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-04-16 1.1.54378",
"system_full": "ClickHouse 1.1.54378 2018-04-16",
"version": "1.1.54378",
"kind": "",
"comments": "",
"result":
[
[0.099, 0.017, 0.014],
[0.176, 0.005, 0.005],
[0.818, 0.018, 0.016],
[0.163, 0.011, 0.011],
[0.264, 0.039, 0.031],
[1.025, 0.026, 0.024],
[0.076, 0.004, 0.003],
[0.335, 0.018, 0.018],
[0.511, 0.034, 0.034],
[0.541, 0.090, 0.088],
[1.881, 0.506, 0.497],
[1.515, 0.546, 0.495],
[0.033, 0.003, 0.003],
[0.034, 0.016, 0.016],
[0.123, 0.105, 0.103],
[3.318, 0.090, 0.088],
[2.609, 0.146, 0.145],
[3.887, 0.067, 0.066],
[1.417, 0.067, 0.065],
[7.034, 0.332, 0.328],
[4.289, 0.177, 0.176],
[1.773, 0.204, 0.214],
[63.407, 2.564, 2.490],
[42.496, 2.011, 1.993],
[4.502, 0.218, 0.216],
[1.952, 0.120, 0.121],
[0.474, 0.059, 0.062],
[6.655, 0.106, 0.101],
[0.044, 0.010, 0.010],
[0.042, 0.007, 0.007],
[0.480, 0.060, 0.056],
[1.684, 0.042, 0.039],
[1.721, 0.091, 0.091],
[2.587, 0.207, 0.206],
[0.067, 0.044, 0.044],
[0.027, 0.008, 0.007],
[2.282, 0.428, 0.425],
[2.273, 0.501, 0.500],
[1.072, 0.133, 0.133],
[1.717, 0.163, 0.163],
[2.604, 0.431, 0.418],
[3.794, 0.546, 0.545],
[2.398, 0.538, 0.585],
[1.272, 0.564, 0.534],
[4.313, 1.445, 1.481],
[3.711, 0.860, 0.831],
[8.169, 3.717, 3.386],
[0.962, 0.029, 0.028],
[18.979, 0.538, 0.533],
[20.836, 0.642, 0.667],
[38.657, 1.461, 1.388],
[29.873, 0.735, 0.675],
[4.756, 0.189, 0.188],
[2.304, 0.161, 0.161],
[5.272, 0.196, 0.195],
[18.616, 0.557, 0.552],
[15.437, 1.136, 1.125],
[2.560, 2.523, 2.497],
[3.912, 0.561, 0.557],
[9.879, 0.774, 0.793],
[9.665, 4.632, 4.677],
[18.892, 2.283, 2.142],
[18.894, 2.264, 2.160],
[0.953, 0.921, 0.915],
[0.099, 0.075, 0.076],
[0.055, 0.038, 0.037],
[0.055, 0.033, 0.031],
[0.202, 0.159, 0.159],
[0.038, 0.011, 0.011],
[0.023, 0.009, 0.009],
[0.012, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-07-12 1.1.54394",
"system_full": "ClickHouse 1.1.54394 2018-07-12",
"version": "1.1.54394",
"kind": "",
"comments": "",
"result":
[
[0.141, 0.016, 0.014],
[0.152, 0.005, 0.005],
[0.820, 0.018, 0.016],
[0.161, 0.011, 0.011],
[0.266, 0.033, 0.032],
[1.024, 0.026, 0.024],
[0.076, 0.004, 0.003],
[0.335, 0.017, 0.017],
[0.508, 0.033, 0.032],
[0.553, 0.090, 0.090],
[1.839, 0.501, 0.497],
[1.519, 0.490, 0.513],
[0.035, 0.003, 0.003],
[0.033, 0.015, 0.015],
[0.116, 0.135, 0.105],
[3.324, 0.088, 0.085],
[2.614, 0.145, 0.144],
[3.882, 0.068, 0.066],
[1.421, 0.067, 0.066],
[7.042, 0.315, 0.312],
[4.326, 0.170, 0.168],
[1.810, 0.205, 0.198],
[63.409, 2.479, 2.463],
[42.496, 1.974, 1.963],
[4.501, 0.223, 0.211],
[1.947, 0.115, 0.114],
[0.488, 0.058, 0.060],
[6.636, 0.103, 0.099],
[0.050, 0.010, 0.010],
[0.043, 0.007, 0.006],
[0.482, 0.055, 0.055],
[1.669, 0.040, 0.041],
[1.713, 0.093, 0.091],
[2.563, 0.201, 0.202],
[0.064, 0.041, 0.041],
[0.028, 0.007, 0.007],
[2.305, 0.425, 0.424],
[2.270, 0.502, 0.506],
[1.070, 0.139, 0.138],
[1.689, 0.166, 0.165],
[2.616, 0.447, 0.459],
[3.835, 0.562, 0.540],
[2.452, 0.581, 0.560],
[1.282, 0.540, 0.544],
[4.321, 1.565, 1.467],
[3.678, 0.811, 0.812],
[8.241, 3.565, 3.693],
[0.961, 0.028, 0.027],
[18.976, 0.509, 0.482],
[20.838, 0.601, 0.737],
[38.809, 1.514, 1.353],
[29.889, 0.690, 0.628],
[4.546, 0.190, 0.188],
[2.314, 0.163, 0.165],
[5.264, 0.197, 0.193],
[18.615, 0.527, 0.516],
[15.439, 1.156, 1.138],
[2.552, 2.521, 2.509],
[3.918, 0.566, 0.557],
[9.883, 0.790, 0.794],
[9.662, 4.592, 4.830],
[18.976, 2.262, 2.254],
[18.926, 2.132, 2.173],
[0.945, 0.918, 0.921],
[0.099, 0.074, 0.074],
[0.053, 0.038, 0.038],
[0.052, 0.030, 0.030],
[0.199, 0.156, 0.155],
[0.038, 0.011, 0.011],
[0.023, 0.009, 0.009],
[0.011, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-07-20 18.1",
"system_full": "ClickHouse 18.1.0 2018-07-20",
"version": "18.1.0",
"kind": "",
"comments": "",
"result":
[
[0.130, 0.015, 0.013],
[0.139, 0.005, 0.005],
[0.822, 0.017, 0.016],
[0.160, 0.011, 0.010],
[0.263, 0.051, 0.059],
[0.994, 0.025, 0.024],
[0.076, 0.004, 0.004],
[0.334, 0.017, 0.017],
[0.507, 0.031, 0.030],
[0.553, 0.087, 0.084],
[1.867, 0.486, 0.484],
[1.528, 0.493, 0.493],
[0.034, 0.003, 0.003],
[0.033, 0.015, 0.015],
[0.117, 0.099, 0.099],
[3.353, 0.087, 0.086],
[2.632, 0.145, 0.144],
[3.913, 0.067, 0.065],
[1.433, 0.063, 0.062],
[7.058, 0.303, 0.301],
[4.355, 0.163, 0.161],
[1.826, 0.187, 0.212],
[63.427, 2.395, 2.319],
[42.481, 1.974, 1.956],
[4.485, 0.213, 0.218],
[1.945, 0.123, 0.116],
[0.445, 0.056, 0.055],
[6.633, 0.102, 0.099],
[0.047, 0.010, 0.010],
[0.044, 0.007, 0.007],
[0.479, 0.055, 0.055],
[1.695, 0.040, 0.039],
[1.731, 0.092, 0.095],
[2.547, 0.197, 0.192],
[0.061, 0.041, 0.041],
[0.025, 0.007, 0.007],
[2.289, 0.429, 0.426],
[2.240, 0.506, 0.502],
[1.062, 0.128, 0.126],
[1.688, 0.155, 0.155],
[2.571, 0.456, 0.423],
[3.814, 0.538, 0.546],
[2.467, 0.563, 0.531],
[1.263, 0.545, 0.553],
[4.303, 1.526, 1.539],
[3.667, 0.911, 0.922],
[8.280, 3.588, 3.559],
[0.938, 0.028, 0.027],
[18.975, 0.519, 0.513],
[20.851, 0.588, 0.724],
[38.765, 1.508, 1.345],
[29.904, 0.684, 0.648],
[4.591, 0.180, 0.175],
[2.350, 0.151, 0.150],
[5.295, 0.185, 0.185],
[18.635, 0.520, 0.512],
[15.431, 1.169, 1.144],
[2.543, 2.542, 2.504],
[3.918, 0.545, 0.540],
[9.879, 0.781, 0.765],
[9.687, 4.567, 4.636],
[18.949, 2.314, 2.136],
[18.946, 2.168, 2.227],
[0.950, 0.902, 0.934],
[0.098, 0.073, 0.075],
[0.056, 0.038, 0.037],
[0.055, 0.030, 0.030],
[0.205, 0.157, 0.155],
[0.037, 0.011, 0.011],
[0.022, 0.009, 0.009],
[0.011, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-07-28 18.4",
"system_full": "ClickHouse 18.4.0 2018-07-28",
"version": "18.4.0",
"kind": "",
"comments": "",
"result":
[
[0.145, 0.015, 0.013],
[0.042, 0.006, 0.005],
[0.662, 0.016, 0.015],
[0.161, 0.011, 0.010],
[0.268, 0.030, 0.029],
[1.026, 0.025, 0.023],
[0.076, 0.004, 0.003],
[0.334, 0.018, 0.018],
[0.501, 0.031, 0.031],
[0.551, 0.089, 0.086],
[1.893, 0.495, 0.489],
[1.521, 0.506, 0.479],
[0.033, 0.003, 0.003],
[0.032, 0.015, 0.015],
[0.163, 0.131, 0.103],
[3.347, 0.090, 0.087],
[2.623, 0.145, 0.146],
[3.895, 0.068, 0.067],
[1.421, 0.063, 0.064],
[7.053, 0.307, 0.304],
[4.320, 0.159, 0.159],
[1.811, 0.194, 0.200],
[63.418, 2.390, 2.315],
[42.482, 1.944, 1.934],
[4.507, 0.213, 0.210],
[1.947, 0.116, 0.119],
[0.481, 0.057, 0.056],
[6.653, 0.100, 0.098],
[0.045, 0.010, 0.010],
[0.043, 0.007, 0.007],
[0.481, 0.054, 0.055],
[1.683, 0.043, 0.044],
[1.732, 0.092, 0.091],
[2.570, 0.192, 0.193],
[0.056, 0.039, 0.039],
[0.025, 0.007, 0.007],
[2.313, 0.428, 0.426],
[2.253, 0.497, 0.498],
[1.076, 0.121, 0.121],
[1.728, 0.148, 0.149],
[2.579, 0.444, 0.414],
[3.796, 0.532, 0.531],
[2.427, 0.556, 0.563],
[1.267, 0.544, 0.542],
[4.314, 1.538, 1.516],
[3.662, 0.804, 0.869],
[8.244, 3.696, 3.698],
[0.956, 0.028, 0.027],
[18.975, 0.514, 0.507],
[20.853, 0.583, 0.726],
[38.834, 1.380, 1.363],
[29.884, 0.675, 0.640],
[4.554, 0.175, 0.173],
[2.360, 0.147, 0.145],
[5.300, 0.179, 0.179],
[18.661, 0.514, 0.505],
[15.432, 1.161, 1.153],
[2.528, 2.542, 2.512],
[3.929, 0.543, 0.533],
[9.838, 0.765, 0.761],
[9.589, 4.506, 4.642],
[18.961, 2.245, 2.185],
[18.935, 2.127, 2.154],
[0.950, 0.913, 0.889],
[0.098, 0.072, 0.072],
[0.054, 0.037, 0.037],
[0.054, 0.030, 0.030],
[0.203, 0.153, 0.158],
[0.037, 0.011, 0.011],
[0.023, 0.009, 0.009],
[0.012, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-07-31 18.5",
"system_full": "ClickHouse 18.5.1 2018-07-31",
"version": "18.5.1",
"kind": "",
"comments": "",
"result":
[
[0.130, 0.015, 0.013],
[0.147, 0.005, 0.005],
[0.819, 0.017, 0.015],
[0.162, 0.011, 0.011],
[0.263, 0.040, 0.054],
[1.003, 0.025, 0.023],
[0.076, 0.004, 0.003],
[0.335, 0.017, 0.017],
[0.509, 0.032, 0.031],
[0.551, 0.095, 0.087],
[1.829, 0.489, 0.496],
[1.510, 0.518, 0.492],
[0.035, 0.003, 0.003],
[0.032, 0.015, 0.015],
[0.122, 0.127, 0.101],
[3.329, 0.090, 0.087],
[2.609, 0.143, 0.141],
[3.895, 0.067, 0.066],
[1.433, 0.064, 0.064],
[7.038, 0.307, 0.305],
[4.335, 0.160, 0.160],
[1.817, 0.216, 0.214],
[63.378, 2.378, 2.313],
[42.494, 1.940, 1.929],
[4.510, 0.212, 0.209],
[1.955, 0.119, 0.117],
[0.496, 0.058, 0.056],
[6.639, 0.100, 0.097],
[0.046, 0.010, 0.010],
[0.044, 0.007, 0.006],
[0.525, 0.055, 0.056],
[1.739, 0.043, 0.041],
[1.749, 0.091, 0.091],
[2.566, 0.193, 0.189],
[0.061, 0.041, 0.041],
[0.026, 0.007, 0.007],
[2.331, 0.427, 0.426],
[2.279, 0.504, 0.502],
[1.054, 0.122, 0.121],
[1.735, 0.149, 0.150],
[2.649, 0.426, 0.415],
[3.799, 0.552, 0.564],
[2.437, 0.573, 0.522],
[1.255, 0.532, 0.556],
[4.340, 1.534, 1.446],
[3.647, 0.811, 0.846],
[8.212, 3.519, 3.542],
[0.951, 0.028, 0.027],
[18.978, 0.661, 0.508],
[20.848, 0.583, 0.575],
[38.808, 1.432, 1.348],
[29.875, 0.679, 0.651],
[4.778, 0.176, 0.174],
[2.370, 0.148, 0.146],
[5.302, 0.186, 0.178],
[18.666, 0.522, 0.514],
[15.419, 1.157, 1.141],
[2.527, 2.526, 2.513],
[3.948, 0.539, 0.544],
[9.857, 0.772, 0.750],
[9.827, 4.565, 4.514],
[18.957, 2.301, 2.151],
[18.952, 2.147, 2.239],
[0.940, 0.897, 0.907],
[0.099, 0.072, 0.073],
[0.055, 0.037, 0.037],
[0.054, 0.030, 0.029],
[0.193, 0.155, 0.152],
[0.035, 0.011, 0.010],
[0.023, 0.009, 0.009],
[0.012, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-08-01 18.6",
"system_full": "ClickHouse 18.6.0 2018-08-01",
"version": "18.6.0",
"kind": "",
"comments": "",
"result":
[
[0.125, 0.014, 0.013],
[0.156, 0.005, 0.005],
[0.818, 0.016, 0.015],
[0.162, 0.011, 0.011],
[0.265, 0.044, 0.031],
[1.023, 0.025, 0.023],
[0.076, 0.004, 0.004],
[0.335, 0.019, 0.017],
[0.508, 0.032, 0.031],
[0.551, 0.088, 0.086],
[1.844, 0.493, 0.491],
[1.520, 0.485, 0.492],
[0.035, 0.003, 0.003],
[0.033, 0.015, 0.015],
[0.155, 0.109, 0.129],
[3.314, 0.090, 0.088],
[2.611, 0.144, 0.142],
[3.902, 0.066, 0.065],
[1.423, 0.064, 0.062],
[7.049, 0.304, 0.330],
[4.330, 0.159, 0.158],
[1.834, 0.193, 0.176],
[63.516, 2.328, 2.310],
[42.645, 1.945, 1.913],
[4.521, 0.212, 0.217],
[1.923, 0.112, 0.114],
[0.479, 0.056, 0.055],
[6.627, 0.101, 0.097],
[0.047, 0.010, 0.009],
[0.043, 0.007, 0.006],
[0.482, 0.058, 0.055],
[1.693, 0.043, 0.043],
[1.744, 0.098, 0.093],
[2.565, 0.192, 0.192],
[0.059, 0.040, 0.040],
[0.026, 0.007, 0.007],
[2.325, 0.425, 0.426],
[2.265, 0.501, 0.499],
[1.043, 0.122, 0.122],
[1.718, 0.151, 0.150],
[2.627, 0.425, 0.441],
[3.801, 0.530, 0.528],
[2.398, 0.525, 0.520],
[1.238, 0.523, 0.543],
[4.345, 1.505, 1.513],
[3.667, 0.851, 0.852],
[8.282, 3.515, 3.493],
[0.962, 0.028, 0.028],
[18.978, 0.518, 0.514],
[20.849, 0.814, 0.578],
[38.796, 1.382, 1.331],
[29.874, 0.665, 0.650],
[4.545, 0.181, 0.174],
[2.356, 0.147, 0.145],
[5.302, 0.180, 0.179],
[18.680, 0.516, 0.509],
[15.430, 1.162, 1.158],
[2.515, 2.502, 2.538],
[3.927, 0.533, 0.525],
[9.878, 0.769, 0.767],
[9.608, 4.694, 4.443],
[19.021, 2.138, 2.202],
[18.958, 2.174, 2.204],
[0.956, 0.899, 0.929],
[0.099, 0.074, 0.073],
[0.055, 0.037, 0.037],
[0.051, 0.031, 0.030],
[0.203, 0.157, 0.156],
[0.040, 0.011, 0.011],
[0.024, 0.009, 0.009],
[0.012, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-08-13 18.10",
"system_full": "ClickHouse 18.10.3 2018-08-13",
"version": "18.10.3",
"kind": "",
"comments": "",
"result":
[
[0.138, 0.015, 0.013],
[0.119, 0.006, 0.006],
[0.820, 0.017, 0.016],
[0.161, 0.011, 0.011],
[0.270, 0.044, 0.046],
[1.009, 0.025, 0.024],
[0.076, 0.004, 0.004],
[0.335, 0.017, 0.017],
[0.505, 0.032, 0.030],
[0.554, 0.087, 0.090],
[1.887, 0.524, 0.504],
[1.530, 0.485, 0.490],
[0.034, 0.003, 0.003],
[0.032, 0.022, 0.020],
[0.174, 0.132, 0.134],
[3.331, 0.091, 0.087],
[2.632, 0.154, 0.152],
[3.881, 0.074, 0.072],
[1.421, 0.067, 0.067],
[7.055, 0.298, 0.291],
[4.370, 0.161, 0.160],
[1.828, 0.417, 0.423],
[63.020, 2.017, 1.993],
[42.473, 1.863, 1.855],
[4.489, 0.220, 0.215],
[1.947, 0.158, 0.208],
[0.357, 0.078, 0.092],
[6.607, 0.115, 0.103],
[0.043, 0.011, 0.010],
[0.043, 0.008, 0.008],
[0.483, 0.060, 0.061],
[1.687, 0.042, 0.042],
[1.732, 0.093, 0.093],
[2.572, 0.194, 0.192],
[0.066, 0.048, 0.048],
[0.028, 0.008, 0.008],
[2.290, 0.435, 0.438],
[2.276, 0.503, 0.506],
[1.054, 0.127, 0.126],
[1.702, 0.160, 0.165],
[2.545, 0.429, 0.464],
[3.846, 0.551, 0.535],
[2.413, 0.575, 0.554],
[1.244, 0.539, 0.582],
[4.310, 1.570, 1.539],
[3.635, 0.910, 0.868],
[8.212, 4.811, 4.268],
[0.947, 0.028, 0.027],
[18.972, 0.518, 0.506],
[20.843, 0.588, 0.572],
[38.776, 1.377, 1.363],
[29.917, 0.670, 0.630],
[4.779, 0.186, 0.182],
[2.330, 0.163, 0.153],
[5.283, 0.193, 0.187],
[18.637, 0.544, 0.518],
[15.417, 1.178, 1.161],
[2.396, 2.348, 2.330],
[3.916, 0.537, 0.539],
[9.855, 0.752, 0.735],
[9.330, 4.220, 4.258],
[18.911, 2.108, 2.111],
[18.849, 2.087, 2.145],
[0.942, 0.885, 0.891],
[0.102, 0.077, 0.073],
[0.062, 0.038, 0.038],
[0.052, 0.030, 0.030],
[0.251, 0.173, 0.193],
[0.035, 0.011, 0.011],
[0.023, 0.009, 0.009],
[0.011, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-09-16 18.12",
"system_full": "ClickHouse 18.12.17 2018-09-16",
"version": "18.12.17",
"kind": "",
"comments": "",
"result":
[
[0.132, 0.018, 0.014],
[0.093, 0.006, 0.006],
[0.818, 0.020, 0.017],
[0.164, 0.012, 0.012],
[0.262, 0.042, 0.044],
[0.988, 0.028, 0.026],
[0.076, 0.004, 0.004],
[0.337, 0.020, 0.021],
[0.506, 0.038, 0.036],
[0.544, 0.098, 0.100],
[1.844, 0.533, 0.538],
[1.497, 0.510, 0.509],
[0.034, 0.004, 0.004],
[0.036, 0.016, 0.016],
[0.142, 0.148, 0.131],
[3.307, 0.098, 0.096],
[2.591, 0.160, 0.157],
[3.871, 0.082, 0.079],
[1.398, 0.074, 0.074],
[7.032, 0.349, 0.336],
[4.265, 0.170, 0.165],
[1.831, 0.221, 0.214],
[63.399, 2.641, 2.602],
[42.509, 2.107, 2.060],
[4.499, 0.233, 0.229],
[1.916, 0.146, 0.140],
[0.418, 0.064, 0.071],
[6.635, 0.123, 0.119],
[0.046, 0.011, 0.011],
[0.037, 0.008, 0.008],
[0.485, 0.062, 0.067],
[1.641, 0.044, 0.043],
[1.696, 0.097, 0.092],
[2.573, 0.200, 0.196],
[0.067, 0.046, 0.046],
[0.032, 0.010, 0.009],
[2.249, 0.429, 0.431],
[2.248, 0.513, 0.508],
[1.058, 0.132, 0.132],
[1.720, 0.162, 0.159],
[2.538, 0.437, 0.431],
[3.844, 0.542, 0.544],
[2.392, 0.533, 0.540],
[1.258, 0.541, 0.530],
[4.264, 1.392, 1.386],
[3.673, 0.799, 0.787],
[8.001, 2.947, 2.931],
[0.935, 0.060, 0.028],
[18.966, 0.610, 0.583],
[20.808, 0.629, 0.617],
[38.800, 1.481, 1.506],
[29.883, 0.663, 0.637],
[4.797, 0.190, 0.188],
[2.316, 0.167, 0.162],
[5.250, 0.199, 0.195],
[18.608, 0.545, 0.518],
[15.452, 1.180, 1.163],
[2.484, 2.458, 2.456],
[3.906, 0.493, 0.500],
[9.845, 0.714, 0.712],
[9.286, 4.143, 4.528],
[18.894, 2.139, 2.143],
[18.917, 2.145, 2.108],
[0.943, 0.872, 0.896],
[0.104, 0.081, 0.079],
[0.064, 0.045, 0.041],
[0.059, 0.036, 0.034],
[0.244, 0.183, 0.183],
[0.040, 0.012, 0.012],
[0.026, 0.011, 0.010],
[0.013, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-12-19 18.14",
"system_full": "ClickHouse 18.14.19 2018-12-19",
"version": "18.14.19",
"kind": "",
"comments": "",
"result":
[
[0.133, 0.016, 0.015],
[0.157, 0.006, 0.006],
[0.816, 0.018, 0.017],
[0.164, 0.012, 0.012],
[0.263, 0.036, 0.040],
[1.029, 0.027, 0.025],
[0.076, 0.004, 0.004],
[0.337, 0.018, 0.018],
[0.503, 0.034, 0.032],
[0.557, 0.088, 0.091],
[1.912, 0.517, 0.510],
[1.523, 0.506, 0.485],
[0.038, 0.004, 0.004],
[0.035, 0.015, 0.015],
[0.141, 0.109, 0.141],
[3.336, 0.088, 0.086],
[2.626, 0.147, 0.144],
[3.906, 0.068, 0.065],
[1.436, 0.067, 0.065],
[7.020, 0.316, 0.300],
[4.302, 0.169, 0.166],
[1.817, 0.197, 0.192],
[63.459, 2.171, 2.150],
[42.546, 1.915, 1.894],
[4.504, 0.219, 0.214],
[1.864, 0.155, 0.146],
[0.428, 0.074, 0.069],
[6.621, 0.111, 0.106],
[0.043, 0.010, 0.009],
[0.044, 0.008, 0.007],
[0.480, 0.059, 0.060],
[1.686, 0.041, 0.040],
[1.725, 0.090, 0.091],
[2.558, 0.195, 0.191],
[0.063, 0.046, 0.043],
[0.033, 0.008, 0.008],
[2.275, 0.434, 0.422],
[2.260, 0.507, 0.511],
[1.074, 0.123, 0.122],
[1.718, 0.151, 0.150],
[2.602, 0.429, 0.418],
[3.819, 0.552, 0.535],
[2.433, 0.508, 0.515],
[1.265, 0.543, 0.536],
[4.278, 1.386, 1.376],
[3.658, 0.865, 0.774],
[7.959, 2.910, 2.931],
[0.948, 0.033, 0.060],
[18.942, 0.572, 0.552],
[20.834, 0.579, 0.570],
[38.782, 1.378, 1.348],
[29.262, 0.607, 0.636],
[4.575, 0.185, 0.178],
[2.330, 0.158, 0.153],
[5.290, 0.193, 0.187],
[18.670, 0.513, 0.505],
[15.443, 1.150, 1.134],
[2.452, 2.435, 2.429],
[3.926, 0.474, 0.477],
[9.856, 0.684, 0.678],
[9.269, 4.033, 4.061],
[18.931, 2.123, 2.073],
[18.914, 2.101, 2.123],
[0.910, 0.860, 0.860],
[0.104, 0.076, 0.078],
[0.060, 0.040, 0.038],
[0.057, 0.033, 0.035],
[0.232, 0.173, 0.162],
[0.035, 0.025, 0.012],
[0.024, 0.013, 0.010],
[0.012, 0.003, 0.003]
]
}
]

View File

@ -0,0 +1,85 @@
[
{
"system": "2018-12-21 18.16",
"system_full": "ClickHouse 18.16.1 2018-12-21",
"version": "18.16.1",
"kind": "",
"comments": "",
"result":
[
[0.133, 0.194, 0.016],
[0.215, 0.082, 0.007],
[0.818, 0.050, 0.021],
[0.156, 0.066, 0.016],
[0.269, 0.038, 0.045],
[1.159, 0.032, 0.029],
[0.086, 0.008, 0.008],
[0.335, 0.065, 0.024],
[0.488, 0.040, 0.040],
[0.550, 0.097, 0.097],
[1.906, 0.593, 0.584],
[1.503, 0.533, 0.522],
[0.038, 0.005, 0.005],
[0.036, 0.023, 0.017],
[0.185, 0.198, 0.162],
[3.335, 0.096, 0.057],
[2.631, 0.172, 0.142],
[3.884, 0.084, 0.057],
[1.463, 0.072, 0.072],
[7.189, 0.312, 0.306],
[4.632, 0.174, 0.171],
[1.865, 0.245, 0.194],
[63.399, 2.148, 2.072],
[42.517, 1.863, 1.819],
[4.507, 0.249, 0.221],
[2.078, 0.138, 0.141],
[0.455, 0.102, 0.070],
[11.873, 0.138, 0.124],
[0.046, 0.012, 0.012],
[0.032, 0.009, 0.009],
[0.482, 0.060, 0.060],
[1.682, 0.045, 0.045],
[1.719, 0.097, 0.096],
[2.567, 0.203, 0.202],
[0.063, 0.045, 0.044],
[0.027, 0.010, 0.010],
[2.283, 0.434, 0.437],
[2.256, 0.520, 0.521],
[1.078, 0.136, 0.135],
[1.693, 0.162, 0.161],
[2.589, 0.464, 0.460],
[3.809, 0.623, 0.589],
[2.391, 0.562, 0.579],
[1.265, 0.575, 0.579],
[4.293, 1.441, 1.485],
[3.656, 0.792, 0.796],
[7.960, 3.260, 3.240],
[0.923, 0.030, 0.029],
[18.973, 0.584, 0.581],
[20.841, 0.600, 0.593],
[38.786, 1.403, 1.398],
[39.036, 0.702, 0.684],
[4.554, 0.194, 0.195],
[2.325, 0.169, 0.165],
[5.239, 0.200, 0.197],
[18.609, 0.522, 0.517],
[15.427, 1.150, 1.153],
[2.475, 2.443, 2.442],
[3.870, 0.524, 0.523],
[9.837, 0.757, 0.748],
[9.334, 4.308, 4.309],
[18.947, 2.232, 2.243],
[18.972, 2.260, 2.283],
[0.991, 0.930, 0.932],
[0.116, 0.116, 0.093],
[0.061, 0.075, 0.050],
[0.056, 0.062, 0.036],
[0.272, 0.303, 0.222],
[0.041, 0.043, 0.014],
[0.025, 0.041, 0.012],
[0.014, 0.030, 0.003]
]
}
]

Some files were not shown because too many files have changed in this diff Show More