mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-12 09:22:05 +00:00
Merge branch 'master' into ci-fuzzer-enable
This commit is contained in:
commit
f07a395bf1
@ -3,8 +3,9 @@
|
|||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
|
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <sstream>
|
#include <string>
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
bool cgroupsV2Enabled()
|
bool cgroupsV2Enabled()
|
||||||
{
|
{
|
||||||
@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
|
|||||||
{
|
{
|
||||||
/// This file exists iff the host has cgroups v2 enabled.
|
/// This file exists iff the host has cgroups v2 enabled.
|
||||||
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
|
||||||
if (!std::filesystem::exists(controllers_file))
|
if (!fs::exists(controllers_file))
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
|
||||||
{
|
{
|
||||||
return false; /// not logging the exception as most callers fall back to cgroups v1
|
return false; /// not logging the exception as most callers fall back to cgroups v1
|
||||||
}
|
}
|
||||||
@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
|
|||||||
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
|
||||||
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
|
||||||
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
fs::path cgroup_dir = cgroupV2PathOfProcess();
|
||||||
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (cgroup_dir.empty())
|
||||||
|
return false;
|
||||||
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
|
||||||
if (!controllers_file.is_open())
|
if (!controllers_file.is_open())
|
||||||
return false;
|
return false;
|
||||||
@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string cgroupV2OfProcess()
|
fs::path cgroupV2PathOfProcess()
|
||||||
{
|
{
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
chassert(cgroupsV2Enabled());
|
chassert(cgroupsV2Enabled());
|
||||||
@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
|
|||||||
/// A simpler way to get the membership is:
|
/// A simpler way to get the membership is:
|
||||||
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
std::ifstream cgroup_name_file("/proc/self/cgroup");
|
||||||
if (!cgroup_name_file.is_open())
|
if (!cgroup_name_file.is_open())
|
||||||
return "";
|
return {};
|
||||||
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
/// With cgroups v2, there will be a *single* line with prefix "0::/"
|
||||||
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
|
||||||
std::string cgroup;
|
std::string cgroup;
|
||||||
std::getline(cgroup_name_file, cgroup);
|
std::getline(cgroup_name_file, cgroup);
|
||||||
static const std::string v2_prefix = "0::/";
|
static const std::string v2_prefix = "0::/";
|
||||||
if (!cgroup.starts_with(v2_prefix))
|
if (!cgroup.starts_with(v2_prefix))
|
||||||
return "";
|
return {};
|
||||||
cgroup = cgroup.substr(v2_prefix.length());
|
cgroup = cgroup.substr(v2_prefix.length());
|
||||||
return cgroup;
|
/// Note: The 'root' cgroup can have an empty cgroup name, this is valid
|
||||||
|
return default_cgroups_mount / cgroup;
|
||||||
#else
|
#else
|
||||||
return "";
|
return {};
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <string>
|
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
|
||||||
@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
|
|||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
bool cgroupsV2MemoryControllerEnabled();
|
bool cgroupsV2MemoryControllerEnabled();
|
||||||
|
|
||||||
/// Which cgroup does the process belong to?
|
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
|
||||||
/// Returns an empty string if the cgroup cannot be determined.
|
/// Returns an empty path the cgroup cannot be determined.
|
||||||
/// Assumes that cgroupsV2Enabled() is enabled.
|
/// Assumes that cgroupsV2Enabled() is enabled.
|
||||||
std::string cgroupV2OfProcess();
|
std::filesystem::path cgroupV2PathOfProcess();
|
||||||
|
@ -23,8 +23,9 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
|||||||
if (!cgroupsV2MemoryControllerEnabled())
|
if (!cgroupsV2MemoryControllerEnabled())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
|
||||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (current_cgroup.empty())
|
||||||
|
return {};
|
||||||
|
|
||||||
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
|
||||||
/// level, try again at the parent level as memory settings are inherited.
|
/// level, try again at the parent level as memory settings are inherited.
|
||||||
|
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
|||||||
Subproject commit d6a01c46327e56fd86beb8aaa31591fcd9a6b7df
|
Subproject commit 8f28e64d15819d2d096badd598c7d85bebddb1f2
|
@ -4,9 +4,6 @@ set(LIBUNWIND_CXX_SOURCES
|
|||||||
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
"${LIBUNWIND_SOURCE_DIR}/src/libunwind.cpp"
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-EHABI.cpp"
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
"${LIBUNWIND_SOURCE_DIR}/src/Unwind-seh.cpp")
|
||||||
if (APPLE)
|
|
||||||
set(LIBUNWIND_CXX_SOURCES ${LIBUNWIND_CXX_SOURCES} "${LIBUNWIND_SOURCE_DIR}/src/Unwind_AppleExtras.cpp")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set(LIBUNWIND_C_SOURCES
|
set(LIBUNWIND_C_SOURCES
|
||||||
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
"${LIBUNWIND_SOURCE_DIR}/src/UnwindLevel1.c"
|
||||||
@ -32,6 +29,7 @@ set_target_properties(unwind PROPERTIES FOLDER "contrib/libunwind-cmake")
|
|||||||
|
|
||||||
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
target_include_directories(unwind SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBUNWIND_SOURCE_DIR}/include>)
|
||||||
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1)
|
||||||
|
target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_REMEMBER_STACK_ALLOC=1)
|
||||||
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
# NOTE: from this macros sizeof(unw_context_t)/sizeof(unw_cursor_t) is depends, so it should be set always
|
||||||
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
target_compile_definitions(unwind PUBLIC -D_LIBUNWIND_IS_NATIVE_ONLY)
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
|
|||||||
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
||||||
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
||||||
|
|
||||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=1
|
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
||||||
|
|
||||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
||||||
|
@ -11,7 +11,7 @@ This is for the case when you have Linux machine and want to use it to build `cl
|
|||||||
|
|
||||||
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
The cross-build for RISC-V 64 is based on the [Build instructions](../development/build.md), follow them first.
|
||||||
|
|
||||||
## Install Clang-16
|
## Install Clang-18
|
||||||
|
|
||||||
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
Follow the instructions from https://apt.llvm.org/ for your Ubuntu or Debian setup or do
|
||||||
```
|
```
|
||||||
|
@ -96,3 +96,22 @@ Result:
|
|||||||
│ 1 │ [2] │ [[4,1]] │
|
│ 1 │ [2] │ [[4,1]] │
|
||||||
└───────────┴───────────┴───────────┘
|
└───────────┴───────────┴───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Reading nested subcolumns from Array
|
||||||
|
|
||||||
|
If nested type `T` inside `Array` has subcolumns (for example, if it's a [named tuple](./tuple.md)), you can read its subcolumns from an `Array(T)` type with the same subcolumn names. The type of a subcolumn will be `Array` of the type of original subcolumn.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE t_arr (arr Array(Tuple(field1 UInt32, field2 String))) ENGINE = MergeTree ORDER BY tuple();
|
||||||
|
INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]);
|
||||||
|
SELECT arr.field1, toTypeName(arr.field1), arr.field2, toTypeName(arr.field2) from t_arr;
|
||||||
|
```
|
||||||
|
|
||||||
|
```test
|
||||||
|
┌─arr.field1─┬─toTypeName(arr.field1)─┬─arr.field2────────────────┬─toTypeName(arr.field2)─┐
|
||||||
|
│ [1,2] │ Array(UInt32) │ ['Hello','World'] │ Array(String) │
|
||||||
|
│ [3,4,5] │ Array(UInt32) │ ['This','is','subcolumn'] │ Array(String) │
|
||||||
|
└────────────┴────────────────────────┴───────────────────────────┴────────────────────────┘
|
||||||
|
```
|
||||||
|
@ -4,6 +4,9 @@ if (USE_CLANG_TIDY)
|
|||||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
set(MAX_LINKER_MEMORY 3500)
|
||||||
|
include(../cmake/limit_jobs.cmake)
|
||||||
|
|
||||||
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
||||||
|
|
||||||
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
|
|
||||||
#include <DataTypes/DataTypeLowCardinality.h>
|
#include <DataTypes/DataTypeLowCardinality.h>
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -616,6 +617,7 @@ private:
|
|||||||
bool is_any_nullable = false;
|
bool is_any_nullable = false;
|
||||||
Tuple args;
|
Tuple args;
|
||||||
args.reserve(equals_functions.size());
|
args.reserve(equals_functions.size());
|
||||||
|
DataTypes tuple_element_types;
|
||||||
/// first we create tuple from RHS of equals functions
|
/// first we create tuple from RHS of equals functions
|
||||||
for (const auto & equals : equals_functions)
|
for (const auto & equals : equals_functions)
|
||||||
{
|
{
|
||||||
@ -628,16 +630,18 @@ private:
|
|||||||
if (const auto * rhs_literal = equals_arguments[1]->as<ConstantNode>())
|
if (const auto * rhs_literal = equals_arguments[1]->as<ConstantNode>())
|
||||||
{
|
{
|
||||||
args.push_back(rhs_literal->getValue());
|
args.push_back(rhs_literal->getValue());
|
||||||
|
tuple_element_types.push_back(rhs_literal->getResultType());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
const auto * lhs_literal = equals_arguments[0]->as<ConstantNode>();
|
const auto * lhs_literal = equals_arguments[0]->as<ConstantNode>();
|
||||||
assert(lhs_literal);
|
assert(lhs_literal);
|
||||||
args.push_back(lhs_literal->getValue());
|
args.push_back(lhs_literal->getValue());
|
||||||
|
tuple_element_types.push_back(lhs_literal->getResultType());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto rhs_node = std::make_shared<ConstantNode>(std::move(args));
|
auto rhs_node = std::make_shared<ConstantNode>(std::move(args), std::make_shared<DataTypeTuple>(std::move(tuple_element_types)));
|
||||||
|
|
||||||
auto in_function = std::make_shared<FunctionNode>("in");
|
auto in_function = std::make_shared<FunctionNode>("in");
|
||||||
|
|
||||||
|
@ -3832,6 +3832,10 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(
|
|||||||
node->convertToNullable();
|
node->convertToNullable();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check parent scopes until find current query scope.
|
||||||
|
if (scope_ptr->scope_node->getNodeType() == QueryTreeNodeType::QUERY)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,9 +55,9 @@ namespace
|
|||||||
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
|
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
|
||||||
settings.auth_settings.region,
|
settings.auth_settings.region,
|
||||||
context->getRemoteHostFilter(),
|
context->getRemoteHostFilter(),
|
||||||
static_cast<unsigned>(global_settings.s3_max_redirects),
|
static_cast<unsigned>(local_settings.s3_max_redirects),
|
||||||
static_cast<unsigned>(global_settings.s3_retry_attempts),
|
static_cast<unsigned>(local_settings.backup_restore_s3_retry_attempts),
|
||||||
global_settings.enable_s3_requests_logging,
|
local_settings.enable_s3_requests_logging,
|
||||||
/* for_disk_s3 = */ false,
|
/* for_disk_s3 = */ false,
|
||||||
request_settings.get_request_throttler,
|
request_settings.get_request_throttler,
|
||||||
request_settings.put_request_throttler,
|
request_settings.put_request_throttler,
|
||||||
|
@ -80,6 +80,7 @@
|
|||||||
#include <Common/config_version.h>
|
#include <Common/config_version.h>
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
using namespace std::literals;
|
using namespace std::literals;
|
||||||
|
|
||||||
@ -2565,12 +2566,12 @@ void ClientBase::runInteractive()
|
|||||||
word_break_characters,
|
word_break_characters,
|
||||||
highlight_callback);
|
highlight_callback);
|
||||||
#else
|
#else
|
||||||
|
(void)word_break_characters;
|
||||||
LineReader lr(
|
LineReader lr(
|
||||||
history_file,
|
history_file,
|
||||||
getClientConfiguration().has("multiline"),
|
getClientConfiguration().has("multiline"),
|
||||||
query_extenders,
|
query_extenders,
|
||||||
query_delimiters,
|
query_delimiters);
|
||||||
word_break_characters);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const std::initializer_list<std::pair<String, String>> backslash_aliases =
|
static const std::initializer_list<std::pair<String, String>> backslash_aliases =
|
||||||
|
@ -46,8 +46,7 @@ public:
|
|||||||
Patterns delimiters,
|
Patterns delimiters,
|
||||||
std::istream & input_stream_ = std::cin,
|
std::istream & input_stream_ = std::cin,
|
||||||
std::ostream & output_stream_ = std::cout,
|
std::ostream & output_stream_ = std::cout,
|
||||||
int in_fd_ = STDIN_FILENO
|
int in_fd_ = STDIN_FILENO);
|
||||||
);
|
|
||||||
|
|
||||||
virtual ~LineReader() = default;
|
virtual ~LineReader() = default;
|
||||||
|
|
||||||
|
@ -362,6 +362,9 @@ ReplxxLineReader::ReplxxLineReader(
|
|||||||
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
|
rx.bind_key(Replxx::KEY::control('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_NEXT, code); });
|
||||||
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
|
rx.bind_key(Replxx::KEY::control('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::HISTORY_PREVIOUS, code); });
|
||||||
|
|
||||||
|
/// We don't want the default, "suspend" behavior, it confuses people.
|
||||||
|
rx.bind_key_internal(replxx::Replxx::KEY::control('Z'), "insert_character");
|
||||||
|
|
||||||
auto commit_action = [this](char32_t code)
|
auto commit_action = [this](char32_t code)
|
||||||
{
|
{
|
||||||
/// If we allow multiline and there is already something in the input, start a newline.
|
/// If we allow multiline and there is already something in the input, start a newline.
|
||||||
|
@ -49,6 +49,7 @@ void logAboutProgress(LoggerPtr log, size_t processed, size_t total, AtomicStopw
|
|||||||
AsyncLoader::Pool::Pool(const AsyncLoader::PoolInitializer & init)
|
AsyncLoader::Pool::Pool(const AsyncLoader::PoolInitializer & init)
|
||||||
: name(init.name)
|
: name(init.name)
|
||||||
, priority(init.priority)
|
, priority(init.priority)
|
||||||
|
, max_threads(init.max_threads > 0 ? init.max_threads : getNumberOfPhysicalCPUCores())
|
||||||
, thread_pool(std::make_unique<ThreadPool>(
|
, thread_pool(std::make_unique<ThreadPool>(
|
||||||
init.metric_threads,
|
init.metric_threads,
|
||||||
init.metric_active_threads,
|
init.metric_active_threads,
|
||||||
@ -56,17 +57,16 @@ AsyncLoader::Pool::Pool(const AsyncLoader::PoolInitializer & init)
|
|||||||
/* max_threads = */ std::numeric_limits<size_t>::max(), // Unlimited number of threads, we do worker management ourselves
|
/* max_threads = */ std::numeric_limits<size_t>::max(), // Unlimited number of threads, we do worker management ourselves
|
||||||
/* max_free_threads = */ 0, // We do not require free threads
|
/* max_free_threads = */ 0, // We do not require free threads
|
||||||
/* queue_size = */0)) // Unlimited queue to avoid blocking during worker spawning
|
/* queue_size = */0)) // Unlimited queue to avoid blocking during worker spawning
|
||||||
, max_threads(init.max_threads > 0 ? init.max_threads : getNumberOfPhysicalCPUCores())
|
|
||||||
{}
|
{}
|
||||||
|
|
||||||
AsyncLoader::Pool::Pool(Pool&& o) noexcept
|
AsyncLoader::Pool::Pool(Pool&& o) noexcept
|
||||||
: name(o.name)
|
: name(o.name)
|
||||||
, priority(o.priority)
|
, priority(o.priority)
|
||||||
, thread_pool(std::move(o.thread_pool))
|
|
||||||
, ready_queue(std::move(o.ready_queue))
|
, ready_queue(std::move(o.ready_queue))
|
||||||
, max_threads(o.max_threads)
|
, max_threads(o.max_threads)
|
||||||
, workers(o.workers)
|
, workers(o.workers)
|
||||||
, suspended_workers(o.suspended_workers.load()) // All these constructors are needed because std::atomic is neither copy-constructible, nor move-constructible. We never move pools after init, so it is safe.
|
, suspended_workers(o.suspended_workers.load()) // All these constructors are needed because std::atomic is neither copy-constructible, nor move-constructible. We never move pools after init, so it is safe.
|
||||||
|
, thread_pool(std::move(o.thread_pool))
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void cancelOnDependencyFailure(const LoadJobPtr & self, const LoadJobPtr & dependency, std::exception_ptr & cancel)
|
void cancelOnDependencyFailure(const LoadJobPtr & self, const LoadJobPtr & dependency, std::exception_ptr & cancel)
|
||||||
|
@ -365,11 +365,11 @@ private:
|
|||||||
{
|
{
|
||||||
const String name;
|
const String name;
|
||||||
const Priority priority;
|
const Priority priority;
|
||||||
std::unique_ptr<ThreadPool> thread_pool; // NOTE: we avoid using a `ThreadPool` queue to be able to move jobs between pools.
|
|
||||||
std::map<UInt64, LoadJobPtr> ready_queue; // FIFO queue of jobs to be executed in this pool. Map is used for faster erasing. Key is `ready_seqno`
|
std::map<UInt64, LoadJobPtr> ready_queue; // FIFO queue of jobs to be executed in this pool. Map is used for faster erasing. Key is `ready_seqno`
|
||||||
size_t max_threads; // Max number of workers to be spawn
|
size_t max_threads; // Max number of workers to be spawn
|
||||||
size_t workers = 0; // Number of currently executing workers
|
size_t workers = 0; // Number of currently executing workers
|
||||||
std::atomic<size_t> suspended_workers{0}; // Number of workers that are blocked by `wait()` call on a job executing in the same pool (for deadlock resolution)
|
std::atomic<size_t> suspended_workers{0}; // Number of workers that are blocked by `wait()` call on a job executing in the same pool (for deadlock resolution)
|
||||||
|
std::unique_ptr<ThreadPool> thread_pool; // NOTE: we avoid using a `ThreadPool` queue to be able to move jobs between pools.
|
||||||
|
|
||||||
explicit Pool(const PoolInitializer & init);
|
explicit Pool(const PoolInitializer & init);
|
||||||
Pool(Pool&& o) noexcept;
|
Pool(Pool&& o) noexcept;
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
using namespace DB;
|
using namespace DB;
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -69,7 +70,7 @@ uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & ke
|
|||||||
|
|
||||||
struct CgroupsV1Reader : ICgroupsReader
|
struct CgroupsV1Reader : ICgroupsReader
|
||||||
{
|
{
|
||||||
explicit CgroupsV1Reader(const std::filesystem::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
|
explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
|
||||||
|
|
||||||
uint64_t readMemoryUsage() override
|
uint64_t readMemoryUsage() override
|
||||||
{
|
{
|
||||||
@ -85,7 +86,7 @@ private:
|
|||||||
|
|
||||||
struct CgroupsV2Reader : ICgroupsReader
|
struct CgroupsV2Reader : ICgroupsReader
|
||||||
{
|
{
|
||||||
explicit CgroupsV2Reader(const std::filesystem::path & stat_file_dir)
|
explicit CgroupsV2Reader(const fs::path & stat_file_dir)
|
||||||
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
|
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -129,8 +130,9 @@ std::optional<std::string> getCgroupsV2Path()
|
|||||||
if (!cgroupsV2MemoryControllerEnabled())
|
if (!cgroupsV2MemoryControllerEnabled())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
String cgroup = cgroupV2OfProcess();
|
fs::path current_cgroup = cgroupV2PathOfProcess();
|
||||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
if (current_cgroup.empty())
|
||||||
|
return {};
|
||||||
|
|
||||||
/// Return the bottom-most nested current memory file. If there is no such file at the current
|
/// Return the bottom-most nested current memory file. If there is no such file at the current
|
||||||
/// level, try again at the parent level as memory settings are inherited.
|
/// level, try again at the parent level as memory settings are inherited.
|
||||||
@ -138,7 +140,7 @@ std::optional<std::string> getCgroupsV2Path()
|
|||||||
{
|
{
|
||||||
const auto current_path = current_cgroup / "memory.current";
|
const auto current_path = current_cgroup / "memory.current";
|
||||||
const auto stat_path = current_cgroup / "memory.stat";
|
const auto stat_path = current_cgroup / "memory.stat";
|
||||||
if (std::filesystem::exists(current_path) && std::filesystem::exists(stat_path))
|
if (fs::exists(current_path) && fs::exists(stat_path))
|
||||||
return {current_cgroup};
|
return {current_cgroup};
|
||||||
current_cgroup = current_cgroup.parent_path();
|
current_cgroup = current_cgroup.parent_path();
|
||||||
}
|
}
|
||||||
@ -148,7 +150,7 @@ std::optional<std::string> getCgroupsV2Path()
|
|||||||
std::optional<std::string> getCgroupsV1Path()
|
std::optional<std::string> getCgroupsV1Path()
|
||||||
{
|
{
|
||||||
auto path = default_cgroups_mount / "memory/memory.stat";
|
auto path = default_cgroups_mount / "memory/memory.stat";
|
||||||
if (!std::filesystem::exists(path))
|
if (!fs::exists(path))
|
||||||
return {};
|
return {};
|
||||||
return {default_cgroups_mount / "memory"};
|
return {default_cgroups_mount / "memory"};
|
||||||
}
|
}
|
||||||
|
@ -37,12 +37,12 @@ uint32_t getCGroupLimitedCPUCores(unsigned default_cpu_count)
|
|||||||
/// cgroupsv2
|
/// cgroupsv2
|
||||||
if (cgroupsV2Enabled())
|
if (cgroupsV2Enabled())
|
||||||
{
|
{
|
||||||
/// First, we identify the cgroup the process belongs
|
/// First, we identify the path of the cgroup the process belongs
|
||||||
std::string cgroup = cgroupV2OfProcess();
|
std::filesystem::path cgroup_path = cgroupV2PathOfProcess();
|
||||||
if (cgroup.empty())
|
if (cgroup_path.empty())
|
||||||
return default_cpu_count;
|
return default_cpu_count;
|
||||||
|
|
||||||
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
|
auto current_cgroup = cgroup_path;
|
||||||
|
|
||||||
// Looking for cpu.max in directories from the current cgroup to the top level
|
// Looking for cpu.max in directories from the current cgroup to the top level
|
||||||
// It does not stop on the first time since the child could have a greater value than parent
|
// It does not stop on the first time since the child could have a greater value than parent
|
||||||
@ -62,7 +62,7 @@ uint32_t getCGroupLimitedCPUCores(unsigned default_cpu_count)
|
|||||||
}
|
}
|
||||||
current_cgroup = current_cgroup.parent_path();
|
current_cgroup = current_cgroup.parent_path();
|
||||||
}
|
}
|
||||||
current_cgroup = default_cgroups_mount / cgroup;
|
current_cgroup = cgroup_path;
|
||||||
// Looking for cpuset.cpus.effective in directories from the current cgroup to the top level
|
// Looking for cpuset.cpus.effective in directories from the current cgroup to the top level
|
||||||
while (current_cgroup != default_cgroups_mount.parent_path())
|
while (current_cgroup != default_cgroups_mount.parent_path())
|
||||||
{
|
{
|
||||||
|
@ -80,7 +80,7 @@ inline ALWAYS_INLINE void * newImpl(std::size_t size, TAlign... align)
|
|||||||
throw std::bad_alloc{};
|
throw std::bad_alloc{};
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ALWAYS_INLINE void * newNoExept(std::size_t size) noexcept
|
inline ALWAYS_INLINE void * newNoExcept(std::size_t size) noexcept
|
||||||
{
|
{
|
||||||
#if USE_GWP_ASAN
|
#if USE_GWP_ASAN
|
||||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||||
@ -99,7 +99,7 @@ inline ALWAYS_INLINE void * newNoExept(std::size_t size) noexcept
|
|||||||
return malloc(size);
|
return malloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline ALWAYS_INLINE void * newNoExept(std::size_t size, std::align_val_t align) noexcept
|
inline ALWAYS_INLINE void * newNoExcept(std::size_t size, std::align_val_t align) noexcept
|
||||||
{
|
{
|
||||||
#if USE_GWP_ASAN
|
#if USE_GWP_ASAN
|
||||||
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
if (unlikely(GWPAsan::GuardedAlloc.shouldSample()))
|
||||||
|
@ -87,7 +87,7 @@ void * operator new(std::size_t size, const std::nothrow_t &) noexcept
|
|||||||
{
|
{
|
||||||
AllocationTrace trace;
|
AllocationTrace trace;
|
||||||
std::size_t actual_size = Memory::trackMemory(size, trace);
|
std::size_t actual_size = Memory::trackMemory(size, trace);
|
||||||
void * ptr = Memory::newNoExept(size);
|
void * ptr = Memory::newNoExcept(size);
|
||||||
trace.onAlloc(ptr, actual_size);
|
trace.onAlloc(ptr, actual_size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
@ -96,7 +96,7 @@ void * operator new[](std::size_t size, const std::nothrow_t &) noexcept
|
|||||||
{
|
{
|
||||||
AllocationTrace trace;
|
AllocationTrace trace;
|
||||||
std::size_t actual_size = Memory::trackMemory(size, trace);
|
std::size_t actual_size = Memory::trackMemory(size, trace);
|
||||||
void * ptr = Memory::newNoExept(size);
|
void * ptr = Memory::newNoExcept(size);
|
||||||
trace.onAlloc(ptr, actual_size);
|
trace.onAlloc(ptr, actual_size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
@ -105,7 +105,7 @@ void * operator new(std::size_t size, std::align_val_t align, const std::nothrow
|
|||||||
{
|
{
|
||||||
AllocationTrace trace;
|
AllocationTrace trace;
|
||||||
std::size_t actual_size = Memory::trackMemory(size, trace, align);
|
std::size_t actual_size = Memory::trackMemory(size, trace, align);
|
||||||
void * ptr = Memory::newNoExept(size, align);
|
void * ptr = Memory::newNoExcept(size, align);
|
||||||
trace.onAlloc(ptr, actual_size);
|
trace.onAlloc(ptr, actual_size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
@ -114,7 +114,7 @@ void * operator new[](std::size_t size, std::align_val_t align, const std::nothr
|
|||||||
{
|
{
|
||||||
AllocationTrace trace;
|
AllocationTrace trace;
|
||||||
std::size_t actual_size = Memory::trackMemory(size, trace, align);
|
std::size_t actual_size = Memory::trackMemory(size, trace, align);
|
||||||
void * ptr = Memory::newNoExept(size, align);
|
void * ptr = Memory::newNoExcept(size, align);
|
||||||
trace.onAlloc(ptr, actual_size);
|
trace.onAlloc(ptr, actual_size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
@ -502,6 +502,7 @@ class IColumn;
|
|||||||
M(UInt64, backup_restore_keeper_value_max_size, 1048576, "Maximum size of data of a [Zoo]Keeper's node during backup", 0) \
|
M(UInt64, backup_restore_keeper_value_max_size, 1048576, "Maximum size of data of a [Zoo]Keeper's node during backup", 0) \
|
||||||
M(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, "Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore", 0) \
|
M(UInt64, backup_restore_batch_size_for_keeper_multiread, 10000, "Maximum size of batch for multiread request to [Zoo]Keeper during backup or restore", 0) \
|
||||||
M(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, "Maximum size of batch for multi request to [Zoo]Keeper during backup or restore", 0) \
|
M(UInt64, backup_restore_batch_size_for_keeper_multi, 1000, "Maximum size of batch for multi request to [Zoo]Keeper during backup or restore", 0) \
|
||||||
|
M(UInt64, backup_restore_s3_retry_attempts, 1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore.", 0) \
|
||||||
M(UInt64, max_backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \
|
M(UInt64, max_backup_bandwidth, 0, "The maximum read speed in bytes per second for particular backup on server. Zero means unlimited.", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \
|
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log, query_thread_log and query_views_log.", 0) \
|
||||||
|
@ -77,6 +77,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
|
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
|
||||||
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
|
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
|
||||||
{"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."},
|
{"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."},
|
||||||
|
{"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."},
|
||||||
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
|
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
|
||||||
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}
|
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}
|
||||||
}},
|
}},
|
||||||
|
@ -162,7 +162,7 @@ public:
|
|||||||
class RetryStrategy : public Aws::Client::RetryStrategy
|
class RetryStrategy : public Aws::Client::RetryStrategy
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit RetryStrategy(uint32_t maxRetries_ = 10, uint32_t scaleFactor_ = 25, uint32_t maxDelayMs_ = 90000);
|
explicit RetryStrategy(uint32_t maxRetries_ = 10, uint32_t scaleFactor_ = 25, uint32_t maxDelayMs_ = 5000);
|
||||||
|
|
||||||
/// NOLINTNEXTLINE(google-runtime-int)
|
/// NOLINTNEXTLINE(google-runtime-int)
|
||||||
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override;
|
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override;
|
||||||
|
@ -125,7 +125,7 @@ HashJoin::HashJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_s
|
|||||||
if (isCrossOrComma(kind))
|
if (isCrossOrComma(kind))
|
||||||
{
|
{
|
||||||
data->type = Type::CROSS;
|
data->type = Type::CROSS;
|
||||||
sample_block_with_columns_to_add = right_sample_block;
|
sample_block_with_columns_to_add = materializeBlock(right_sample_block);
|
||||||
}
|
}
|
||||||
else if (table_join->getClauses().empty())
|
else if (table_join->getClauses().empty())
|
||||||
{
|
{
|
||||||
|
@ -226,10 +226,11 @@ void EmbeddedRocksDBBulkSink::consume(Chunk chunk_)
|
|||||||
if (chunks_to_write.empty())
|
if (chunks_to_write.empty())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
size_t num_chunks = chunks_to_write.size();
|
||||||
auto [serialized_key_column, serialized_value_column]
|
auto [serialized_key_column, serialized_value_column]
|
||||||
= storage.ttl > 0 ? serializeChunks<true>(std::move(chunks_to_write)) : serializeChunks<false>(std::move(chunks_to_write));
|
= storage.ttl > 0 ? serializeChunks<true>(std::move(chunks_to_write)) : serializeChunks<false>(std::move(chunks_to_write));
|
||||||
auto sst_file_path = getTemporarySSTFilePath();
|
auto sst_file_path = getTemporarySSTFilePath();
|
||||||
LOG_DEBUG(getLogger("EmbeddedRocksDBBulkSink"), "Writing {} rows to SST file {}", serialized_key_column->size(), sst_file_path);
|
LOG_DEBUG(getLogger("EmbeddedRocksDBBulkSink"), "Writing {} rows from {} chunks to SST file {}", serialized_key_column->size(), num_chunks, sst_file_path);
|
||||||
if (auto status = buildSSTFile(sst_file_path, *serialized_key_column, *serialized_value_column); !status.ok())
|
if (auto status = buildSSTFile(sst_file_path, *serialized_key_column, *serialized_value_column); !status.ok())
|
||||||
throw Exception(ErrorCodes::ROCKSDB_ERROR, "RocksDB write error: {}", status.ToString());
|
throw Exception(ErrorCodes::ROCKSDB_ERROR, "RocksDB write error: {}", status.ToString());
|
||||||
|
|
||||||
|
@ -420,7 +420,7 @@ String transformQueryForExternalDatabase(
|
|||||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "No column names for query '{}' to external table '{}.{}'",
|
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "No column names for query '{}' to external table '{}.{}'",
|
||||||
query_info.query_tree->formatASTForErrorMessage(), database, table);
|
query_info.query_tree->formatASTForErrorMessage(), database, table);
|
||||||
|
|
||||||
auto clone_query = getASTForExternalDatabaseFromQueryTree(query_info.query_tree);
|
auto clone_query = getASTForExternalDatabaseFromQueryTree(query_info.query_tree, query_info.table_expression);
|
||||||
|
|
||||||
return transformQueryForExternalDatabaseImpl(
|
return transformQueryForExternalDatabaseImpl(
|
||||||
clone_query,
|
clone_query,
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <Storages/transformQueryForExternalDatabaseAnalyzer.h>
|
#include <Storages/transformQueryForExternalDatabaseAnalyzer.h>
|
||||||
|
|
||||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||||
|
#include <Parsers/ASTSelectQuery.h>
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
|
||||||
#include <Columns/ColumnConst.h>
|
#include <Columns/ColumnConst.h>
|
||||||
@ -10,7 +11,7 @@
|
|||||||
#include <Analyzer/QueryNode.h>
|
#include <Analyzer/QueryNode.h>
|
||||||
#include <Analyzer/ConstantNode.h>
|
#include <Analyzer/ConstantNode.h>
|
||||||
#include <Analyzer/ConstantValue.h>
|
#include <Analyzer/ConstantValue.h>
|
||||||
|
#include <Analyzer/JoinNode.h>
|
||||||
|
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
|
||||||
@ -20,6 +21,7 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int UNSUPPORTED_METHOD;
|
extern const int UNSUPPORTED_METHOD;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
@ -55,7 +57,7 @@ public:
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree)
|
ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree, const QueryTreeNodePtr & table_expression)
|
||||||
{
|
{
|
||||||
auto new_tree = query_tree->clone();
|
auto new_tree = query_tree->clone();
|
||||||
|
|
||||||
@ -63,6 +65,20 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
|
|||||||
visitor.visit(new_tree);
|
visitor.visit(new_tree);
|
||||||
const auto * query_node = new_tree->as<QueryNode>();
|
const auto * query_node = new_tree->as<QueryNode>();
|
||||||
|
|
||||||
|
const auto & join_tree = query_node->getJoinTree();
|
||||||
|
bool allow_where = true;
|
||||||
|
if (const auto * join_node = join_tree->as<JoinNode>())
|
||||||
|
{
|
||||||
|
if (join_node->getStrictness() != JoinStrictness::All)
|
||||||
|
allow_where = false;
|
||||||
|
else if (join_node->getKind() == JoinKind::Left)
|
||||||
|
allow_where = join_node->getLeftTableExpression()->isEqual(*table_expression);
|
||||||
|
else if (join_node->getKind() == JoinKind::Right)
|
||||||
|
allow_where = join_node->getRightTableExpression()->isEqual(*table_expression);
|
||||||
|
else
|
||||||
|
allow_where = (join_node->getKind() == JoinKind::Inner);
|
||||||
|
}
|
||||||
|
|
||||||
auto query_node_ast = query_node->toAST({ .add_cast_for_constants = false, .fully_qualified_identifiers = false });
|
auto query_node_ast = query_node->toAST({ .add_cast_for_constants = false, .fully_qualified_identifiers = false });
|
||||||
const IAST * ast = query_node_ast.get();
|
const IAST * ast = query_node_ast.get();
|
||||||
|
|
||||||
@ -76,7 +92,13 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
|
|||||||
if (union_ast->list_of_selects->children.size() != 1)
|
if (union_ast->list_of_selects->children.size() != 1)
|
||||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "QueryNode AST is not a single ASTSelectQuery, got {}", union_ast->list_of_selects->children.size());
|
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "QueryNode AST is not a single ASTSelectQuery, got {}", union_ast->list_of_selects->children.size());
|
||||||
|
|
||||||
return union_ast->list_of_selects->children.at(0);
|
ASTPtr select_query = union_ast->list_of_selects->children.at(0);
|
||||||
|
auto * select_query_typed = select_query->as<ASTSelectQuery>();
|
||||||
|
if (!select_query_typed)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ASTSelectQuery, got {}", select_query ? select_query->formatForErrorMessage() : "nullptr");
|
||||||
|
if (!allow_where)
|
||||||
|
select_query_typed->setExpression(ASTSelectQuery::Expression::WHERE, nullptr);
|
||||||
|
return select_query;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,6 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree);
|
ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tree, const QueryTreeNodePtr & table_expression);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -58,14 +58,14 @@ def decompress_fast(archive_path: Path, result_path: Optional[Path] = None) -> N
|
|||||||
archive_path,
|
archive_path,
|
||||||
result_path,
|
result_path,
|
||||||
)
|
)
|
||||||
program_part = "--use-compress-program='zstd --threads=0'"
|
program_part = "--use-compress-program='zstd --threads=0 -d'"
|
||||||
elif PIGZ.exists():
|
elif PIGZ.exists():
|
||||||
logging.info(
|
logging.info(
|
||||||
"pigz found, will compress and decompress faster ('%s' -> '%s')",
|
"pigz found, will compress and decompress faster ('%s' -> '%s')",
|
||||||
archive_path,
|
archive_path,
|
||||||
result_path,
|
result_path,
|
||||||
)
|
)
|
||||||
program_part = "--use-compress-program='pigz'"
|
program_part = "--use-compress-program='pigz -d'"
|
||||||
else:
|
else:
|
||||||
program_part = "-z"
|
program_part = "-z"
|
||||||
logging.info(
|
logging.info(
|
||||||
|
@ -3386,13 +3386,13 @@ def parse_args():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--replace-replicated-with-shared",
|
"--replace-replicated-with-shared",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=os.environ.get("USE_META_IN_KEEPER_FOR_MERGE_TREE", False),
|
default=os.environ.get("REPLACE_RMT_WITH_SMT", False),
|
||||||
help="Replace ReplicatedMergeTree engine with SharedMergeTree",
|
help="Replace ReplicatedMergeTree engine with SharedMergeTree",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--replace-non-replicated-with-shared",
|
"--replace-non-replicated-with-shared",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
default=False,
|
default=os.environ.get("REPLACE_MT_WITH_SMT", False),
|
||||||
help="Replace ordinary MergeTree engine with SharedMergeTree",
|
help="Replace ordinary MergeTree engine with SharedMergeTree",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
<profiles>
|
<profiles>
|
||||||
<default>
|
<default>
|
||||||
<s3_retry_attempts>5</s3_retry_attempts>
|
<s3_retry_attempts>5</s3_retry_attempts>
|
||||||
|
<backup_restore_s3_retry_attempts>5</backup_restore_s3_retry_attempts>
|
||||||
</default>
|
</default>
|
||||||
</profiles>
|
</profiles>
|
||||||
<users>
|
<users>
|
||||||
|
@ -834,6 +834,60 @@ def test_literal_escaping(started_cluster):
|
|||||||
cursor.execute(f"DROP TABLE escaping")
|
cursor.execute(f"DROP TABLE escaping")
|
||||||
|
|
||||||
|
|
||||||
|
def test_filter_pushdown(started_cluster):
|
||||||
|
cursor = started_cluster.postgres_conn.cursor()
|
||||||
|
cursor.execute("CREATE SCHEMA test_filter_pushdown")
|
||||||
|
cursor.execute(
|
||||||
|
"CREATE TABLE test_filter_pushdown.test_table (id integer, value integer)"
|
||||||
|
)
|
||||||
|
cursor.execute(
|
||||||
|
"INSERT INTO test_filter_pushdown.test_table VALUES (1, 10), (1, 110), (2, 0), (3, 33), (4, 0)"
|
||||||
|
)
|
||||||
|
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE test_filter_pushdown_pg_table (id UInt32, value UInt32)
|
||||||
|
ENGINE PostgreSQL('postgres1:5432', 'postgres', 'test_table', 'postgres', 'mysecretpassword', 'test_filter_pushdown');
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE test_filter_pushdown_local_table (id UInt32, value UInt32) ENGINE Memory AS SELECT * FROM test_filter_pushdown_pg_table
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
node1.query(
|
||||||
|
"CREATE TABLE ch_table (id UInt32, pg_id UInt32) ENGINE MergeTree ORDER BY id"
|
||||||
|
)
|
||||||
|
node1.query("INSERT INTO ch_table VALUES (1, 1), (2, 2), (3, 1), (4, 2), (5, 999)")
|
||||||
|
|
||||||
|
def compare_results(query, **kwargs):
|
||||||
|
result1 = node1.query(
|
||||||
|
query.format(pg_table="test_filter_pushdown_pg_table", **kwargs)
|
||||||
|
)
|
||||||
|
result2 = node1.query(
|
||||||
|
query.format(pg_table="test_filter_pushdown_local_table", **kwargs)
|
||||||
|
)
|
||||||
|
assert result1 == result2
|
||||||
|
|
||||||
|
for kind in ["INNER", "LEFT", "RIGHT", "FULL", "ANY LEFT", "SEMI RIGHT"]:
|
||||||
|
for value in [0, 10]:
|
||||||
|
compare_results(
|
||||||
|
"SELECT * FROM ch_table {kind} JOIN {pg_table} as p ON ch_table.pg_id = p.id WHERE value = {value} ORDER BY ALL",
|
||||||
|
kind=kind,
|
||||||
|
value=value,
|
||||||
|
)
|
||||||
|
|
||||||
|
compare_results(
|
||||||
|
"SELECT * FROM {pg_table} as p {kind} JOIN ch_table ON ch_table.pg_id = p.id WHERE value = {value} ORDER BY ALL",
|
||||||
|
kind=kind,
|
||||||
|
value=value,
|
||||||
|
)
|
||||||
|
|
||||||
|
cursor.execute("DROP SCHEMA test_filter_pushdown CASCADE")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
cluster.start()
|
cluster.start()
|
||||||
input("Cluster created, press any key to destroy...")
|
input("Cluster created, press any key to destroy...")
|
||||||
|
@ -30,7 +30,7 @@ $CLICKHOUSE_CLIENT --max_block_size 1 --min_insert_block_size_rows 1 --min_inser
|
|||||||
|
|
||||||
for _ in {1..60}; do
|
for _ in {1..60}; do
|
||||||
$CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS"
|
$CLICKHOUSE_CLIENT --query "SYSTEM FLUSH LOGS"
|
||||||
[[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%'") -gt $((SCALE - 100)) ]] && break;
|
[[ $($CLICKHOUSE_CLIENT --query "SELECT sum(toUInt32(extract(message, 'Removed (\d+) old log entries'))) FROM system.text_log WHERE event_date >= yesterday() AND logger_name LIKE '%' || '$CLICKHOUSE_DATABASE' || '%r1%(ReplicatedMergeTreeCleanupThread)%' AND message LIKE '%Removed % old log entries%'") -gt $((SCALE - 10)) ]] && break;
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ kill -TERM $PID_1 && kill -TERM $PID_2 && kill -TERM $PID_3 && kill -TERM $PID_4
|
|||||||
wait
|
wait
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT '$CLICKHOUSE_DATABASE', 'threads finished'"
|
$CLICKHOUSE_CLIENT -q "SELECT '$CLICKHOUSE_DATABASE', 'threads finished'"
|
||||||
wait_for_queries_to_finish
|
wait_for_queries_to_finish 60
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA alter_table0"
|
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA alter_table0"
|
||||||
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA alter_table1"
|
$CLICKHOUSE_CLIENT -q "SYSTEM SYNC REPLICA alter_table1"
|
||||||
|
@ -1,102 +1,42 @@
|
|||||||
flat
|
flat
|
||||||
-- { echoOn }
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
|
|
||||||
SELECT key, value FROM dict_flat ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_flat ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 Second
|
2 Second
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_flat ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 SecondUpdated
|
2 SecondUpdated
|
||||||
3 Third
|
3 Third
|
||||||
flat/custom
|
flat/custom
|
||||||
-- { echoOn }
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
|
|
||||||
SELECT key, value FROM dict_flat_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_flat_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 Second
|
2 Second
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_flat_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 SecondUpdated
|
2 SecondUpdated
|
||||||
3 Third
|
3 Third
|
||||||
hashed
|
hashed
|
||||||
-- { echoOn }
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
|
|
||||||
SELECT key, value FROM dict_hashed ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_hashed ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 Second
|
2 Second
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_hashed ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 SecondUpdated
|
2 SecondUpdated
|
||||||
3 Third
|
3 Third
|
||||||
hashed/custom
|
hashed/custom
|
||||||
-- { echoOn }
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
|
|
||||||
SELECT key, value FROM dict_hashed_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_hashed_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 Second
|
2 Second
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_hashed_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 SecondUpdated
|
2 SecondUpdated
|
||||||
3 Third
|
3 Third
|
||||||
complex_key_hashed
|
complex_key_hashed
|
||||||
-- { echoOn }
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
|
|
||||||
SELECT key, value FROM dict_complex_key_hashed ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_complex_key_hashed ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 Second
|
2 Second
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_complex_key_hashed ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 SecondUpdated
|
2 SecondUpdated
|
||||||
3 Third
|
3 Third
|
||||||
complex_key_hashed/custom
|
complex_key_hashed/custom
|
||||||
-- { echoOn }
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
|
|
||||||
SELECT key, value FROM dict_complex_key_hashed_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_complex_key_hashed_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 Second
|
2 Second
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
|
|
||||||
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
|
|
||||||
SELECT key, value FROM dict_complex_key_hashed_custom ORDER BY key ASC;
|
|
||||||
1 First
|
1 First
|
||||||
2 SecondUpdated
|
2 SecondUpdated
|
||||||
3 Third
|
3 Third
|
||||||
|
@ -35,7 +35,7 @@ for layout in "${layouts[@]}"; do
|
|||||||
echo "$layout"
|
echo "$layout"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -nm -q "
|
$CLICKHOUSE_CLIENT --multiquery "
|
||||||
TRUNCATE TABLE table_for_update_field_dictionary;
|
TRUNCATE TABLE table_for_update_field_dictionary;
|
||||||
|
|
||||||
CREATE DICTIONARY $dictionary_name
|
CREATE DICTIONARY $dictionary_name
|
||||||
@ -49,24 +49,31 @@ for layout in "${layouts[@]}"; do
|
|||||||
LAYOUT($layout())
|
LAYOUT($layout())
|
||||||
LIFETIME(1);
|
LIFETIME(1);
|
||||||
|
|
||||||
-- { echoOn }
|
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());"
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (1, 'First', now());
|
|
||||||
SELECT key, value FROM $dictionary_name ORDER BY key ASC;
|
|
||||||
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());
|
while true
|
||||||
SELECT sleepEachRow(1) FROM numbers(10) SETTINGS function_sleep_max_microseconds_per_block = 10000000 FORMAT Null;
|
do
|
||||||
|
$CLICKHOUSE_CLIENT --query "SELECT key, value FROM $dictionary_name ORDER BY key ASC" | grep -A10 -B10 'First' && break;
|
||||||
|
sleep .1;
|
||||||
|
done
|
||||||
|
|
||||||
SELECT key, value FROM $dictionary_name ORDER BY key ASC;
|
$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_update_field_dictionary VALUES (2, 'Second', now());"
|
||||||
|
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now());
|
while true
|
||||||
INSERT INTO table_for_update_field_dictionary VALUES (3, 'Third', now());
|
do
|
||||||
SELECT sleepEachRow(1) FROM numbers(20) SETTINGS function_sleep_max_microseconds_per_block = 20000000 FORMAT Null;
|
$CLICKHOUSE_CLIENT --query "SELECT key, value FROM $dictionary_name ORDER BY key ASC" | grep -A10 -B10 'Second' && break;
|
||||||
|
sleep .1;
|
||||||
|
done
|
||||||
|
|
||||||
SELECT key, value FROM $dictionary_name ORDER BY key ASC;
|
$CLICKHOUSE_CLIENT --query "INSERT INTO table_for_update_field_dictionary VALUES (2, 'SecondUpdated', now()), (3, 'Third', now())"
|
||||||
-- { echoOff }
|
|
||||||
|
|
||||||
DROP DICTIONARY $dictionary_name;
|
while true
|
||||||
"
|
do
|
||||||
|
$CLICKHOUSE_CLIENT --query "SELECT key, value FROM $dictionary_name ORDER BY key ASC" | grep -A10 -B10 'SecondUpdated' && break;
|
||||||
|
sleep .1;
|
||||||
|
done
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --query "DROP DICTIONARY $dictionary_name"
|
||||||
|
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
@ -264,3 +264,44 @@ SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10;
|
|||||||
9 \N 9
|
9 \N 9
|
||||||
\N 0 20
|
\N 0 20
|
||||||
\N 1 25
|
\N 1 25
|
||||||
|
CREATE TABLE test
|
||||||
|
ENGINE = ReplacingMergeTree
|
||||||
|
PRIMARY KEY id
|
||||||
|
AS SELECT number AS id FROM numbers(100);
|
||||||
|
SELECT id
|
||||||
|
FROM test
|
||||||
|
GROUP BY id
|
||||||
|
WITH CUBE
|
||||||
|
HAVING id IN (
|
||||||
|
SELECT id
|
||||||
|
FROM test
|
||||||
|
)
|
||||||
|
FORMAT `NUll`
|
||||||
|
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
|
||||||
|
SELECT id
|
||||||
|
FROM test
|
||||||
|
FINAL
|
||||||
|
GROUP BY id
|
||||||
|
WITH CUBE
|
||||||
|
HAVING id IN (
|
||||||
|
SELECT DISTINCT id
|
||||||
|
FROM test
|
||||||
|
FINAL
|
||||||
|
)
|
||||||
|
FORMAT `NUll`
|
||||||
|
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
|
||||||
|
SELECT id
|
||||||
|
FROM test
|
||||||
|
FINAL
|
||||||
|
GROUP BY
|
||||||
|
GROUPING SETS ((id))
|
||||||
|
ORDER BY
|
||||||
|
id IN (
|
||||||
|
SELECT DISTINCT id
|
||||||
|
FROM test
|
||||||
|
FINAL
|
||||||
|
LIMIT 4
|
||||||
|
) ASC
|
||||||
|
LIMIT 256 BY id
|
||||||
|
FORMAT `NUll`
|
||||||
|
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls=true;
|
||||||
|
@ -83,3 +83,48 @@ GROUP BY
|
|||||||
)
|
)
|
||||||
ORDER BY 1, tuple(val)
|
ORDER BY 1, tuple(val)
|
||||||
SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10;
|
SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10;
|
||||||
|
|
||||||
|
CREATE TABLE test
|
||||||
|
ENGINE = ReplacingMergeTree
|
||||||
|
PRIMARY KEY id
|
||||||
|
AS SELECT number AS id FROM numbers(100);
|
||||||
|
|
||||||
|
SELECT id
|
||||||
|
FROM test
|
||||||
|
GROUP BY id
|
||||||
|
WITH CUBE
|
||||||
|
HAVING id IN (
|
||||||
|
SELECT id
|
||||||
|
FROM test
|
||||||
|
)
|
||||||
|
FORMAT `NUll`
|
||||||
|
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
|
||||||
|
|
||||||
|
SELECT id
|
||||||
|
FROM test
|
||||||
|
FINAL
|
||||||
|
GROUP BY id
|
||||||
|
WITH CUBE
|
||||||
|
HAVING id IN (
|
||||||
|
SELECT DISTINCT id
|
||||||
|
FROM test
|
||||||
|
FINAL
|
||||||
|
)
|
||||||
|
FORMAT `NUll`
|
||||||
|
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls = true;
|
||||||
|
|
||||||
|
SELECT id
|
||||||
|
FROM test
|
||||||
|
FINAL
|
||||||
|
GROUP BY
|
||||||
|
GROUPING SETS ((id))
|
||||||
|
ORDER BY
|
||||||
|
id IN (
|
||||||
|
SELECT DISTINCT id
|
||||||
|
FROM test
|
||||||
|
FINAL
|
||||||
|
LIMIT 4
|
||||||
|
) ASC
|
||||||
|
LIMIT 256 BY id
|
||||||
|
FORMAT `NUll`
|
||||||
|
SETTINGS allow_experimental_analyzer = 1, group_by_use_nulls=true;
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=1 format Null;
|
|
||||||
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=1, use_hedged_requests=0 format Null;
|
|
||||||
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=0 format Null;
|
|
@ -3,7 +3,7 @@
|
|||||||
1000
|
1000
|
||||||
1
|
1
|
||||||
1000
|
1000
|
||||||
2
|
1
|
||||||
1000000
|
1000000
|
||||||
1000
|
1000
|
||||||
0 999001
|
0 999001
|
||||||
|
@ -29,7 +29,7 @@ ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;"
|
|||||||
${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;"
|
${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE rocksdb_worm;"
|
||||||
# Must set both max_threads and max_insert_threads to 2 to make sure there is only two sinks
|
# Must set both max_threads and max_insert_threads to 2 to make sure there is only two sinks
|
||||||
${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000) SETTINGS max_threads = 2, max_insert_threads = 2, max_block_size = 10000, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, insert_deduplication_token = '', optimize_trivial_insert_select = 1;"
|
${CLICKHOUSE_CLIENT} --query "INSERT INTO rocksdb_worm SELECT number, number+1 FROM numbers_mt(1000000) SETTINGS max_threads = 2, max_insert_threads = 2, max_block_size = 10000, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, insert_deduplication_token = '', optimize_trivial_insert_select = 1;"
|
||||||
${CLICKHOUSE_CLIENT} --query "SELECT sum(value) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be 2 because default bulk sink size is ~1M rows / SST file
|
${CLICKHOUSE_CLIENT} --query "SELECT sum(value) IN (1, 2) FROM system.rocksdb WHERE database = currentDatabase() AND table = 'rocksdb_worm' AND name = 'no.file.opens';" # should be not more than 2 because default bulk sink size is ~1M rows / SST file.
|
||||||
${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;"
|
${CLICKHOUSE_CLIENT} --query "SELECT count() FROM rocksdb_worm;"
|
||||||
|
|
||||||
# Testing insert with duplicated keys
|
# Testing insert with duplicated keys
|
||||||
|
@ -7,12 +7,14 @@ SETTINGS max_bytes_to_merge_at_max_space_in_pool = 80000, exclude_deleted_rows_f
|
|||||||
INSERT INTO lwd_merge SELECT number FROM numbers(10000);
|
INSERT INTO lwd_merge SELECT number FROM numbers(10000);
|
||||||
INSERT INTO lwd_merge SELECT number FROM numbers(10000, 10000);
|
INSERT INTO lwd_merge SELECT number FROM numbers(10000, 10000);
|
||||||
|
|
||||||
OPTIMIZE TABLE lwd_merge;
|
SET optimize_throw_if_noop = 1;
|
||||||
|
|
||||||
|
OPTIMIZE TABLE lwd_merge; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
|
||||||
SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1;
|
SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1;
|
||||||
|
|
||||||
DELETE FROM lwd_merge WHERE id % 10 > 0;
|
DELETE FROM lwd_merge WHERE id % 10 > 0;
|
||||||
|
|
||||||
OPTIMIZE TABLE lwd_merge;
|
OPTIMIZE TABLE lwd_merge; -- { serverError CANNOT_ASSIGN_OPTIMIZE }
|
||||||
SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1;
|
SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1;
|
||||||
|
|
||||||
ALTER TABLE lwd_merge MODIFY SETTING exclude_deleted_rows_for_part_size_in_merge = 1;
|
ALTER TABLE lwd_merge MODIFY SETTING exclude_deleted_rows_for_part_size_in_merge = 1;
|
||||||
|
@ -0,0 +1,2 @@
|
|||||||
|
2020-01-01
|
||||||
|
2020-01-02
|
@ -0,0 +1,12 @@
|
|||||||
|
SET allow_experimental_analyzer=1;
|
||||||
|
CREATE TABLE foo (i Date) ENGINE MergeTree ORDER BY i;
|
||||||
|
INSERT INTO foo VALUES ('2020-01-01');
|
||||||
|
INSERT INTO foo VALUES ('2020-01-02');
|
||||||
|
|
||||||
|
SET optimize_min_equality_disjunction_chain_length = 3;
|
||||||
|
SELECT *
|
||||||
|
FROM foo
|
||||||
|
WHERE (foo.i = parseDateTimeBestEffort('2020-01-01'))
|
||||||
|
OR (foo.i = parseDateTimeBestEffort('2020-01-02'))
|
||||||
|
OR (foo.i = parseDateTimeBestEffort('2020-01-03'))
|
||||||
|
ORDER BY foo.i ASC
|
@ -0,0 +1,2 @@
|
|||||||
|
1 nan 1048575 2
|
||||||
|
1 1 1 1 1
|
7
tests/queries/0_stateless/03205_column_type_check.sql
Normal file
7
tests/queries/0_stateless/03205_column_type_check.sql
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
SELECT * FROM (SELECT toUInt256(1)) AS t, (SELECT greatCircleAngle(toLowCardinality(toNullable(toUInt256(1048575))), 257, -9223372036854775808, 1048576), 1048575, materialize(2)) AS u;
|
||||||
|
|
||||||
|
|
||||||
|
SET join_algorithm='hash';
|
||||||
|
SET allow_experimental_join_condition=1;
|
||||||
|
SELECT * FROM ( SELECT 1 AS a, toLowCardinality(1), 1) AS t1 CROSS JOIN (SELECT toLowCardinality(1 AS a), 1 AS b) AS t2;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user