mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-30 03:22:14 +00:00
Merge branch 'master' into vdimir/fix_02434_cancel_insert_when_client_dies
This commit is contained in:
commit
40ca34cc43
@ -22,7 +22,6 @@
|
|||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||||
* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)).
|
|
||||||
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
||||||
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
||||||
|
@ -34,17 +34,13 @@ curl https://clickhouse.com/ | sh
|
|||||||
|
|
||||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||||
|
|
||||||
* [v24.7 Community Call](https://clickhouse.com/company/events/v24-7-community-release-call) - Jul 30
|
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||||
|
|
||||||
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
|
* MORE COMING SOON!
|
||||||
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
|
|
||||||
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
|
|
||||||
* [AWS Summit in New York](https://clickhouse.com/company/events/2024-07-awssummit-nyc) - Jul 10
|
|
||||||
* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11
|
|
||||||
|
|
||||||
## Recent Recordings
|
## Recent Recordings
|
||||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||||
|
@ -57,7 +57,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat
|
|||||||
|
|
||||||
if (WITH_COVERAGE)
|
if (WITH_COVERAGE)
|
||||||
message (STATUS "Enabled instrumentation for code coverage")
|
message (STATUS "Enabled instrumentation for code coverage")
|
||||||
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
set(COVERAGE_FLAGS "SHELL:-fprofile-instr-generate -fcoverage-mapping")
|
||||||
|
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||||
|
2
contrib/libprotobuf-mutator
vendored
2
contrib/libprotobuf-mutator
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a304ec48dcf15d942607032151f7e9ee504b5dcf
|
Subproject commit 1f95f8083066f5b38fd2db172e7e7f9aa7c49d2d
|
@ -103,8 +103,6 @@ Default: 2
|
|||||||
|
|
||||||
The policy on how to perform a scheduling for background merges and mutations. Possible values are: `round_robin` and `shortest_task_first`.
|
The policy on how to perform a scheduling for background merges and mutations. Possible values are: `round_robin` and `shortest_task_first`.
|
||||||
|
|
||||||
## background_merges_mutations_scheduling_policy
|
|
||||||
|
|
||||||
Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart.
|
Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart.
|
||||||
Could be applied from the `default` profile for backward compatibility.
|
Could be applied from the `default` profile for backward compatibility.
|
||||||
|
|
||||||
|
@ -5608,3 +5608,9 @@ Default value: `10000000`.
|
|||||||
Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached.
|
Minimal size of block to compress in CROSS JOIN. Zero value means - disable this threshold. This block is compressed when any of the two thresholds (by rows or by bytes) are reached.
|
||||||
|
|
||||||
Default value: `1GiB`.
|
Default value: `1GiB`.
|
||||||
|
|
||||||
|
## disable_insertion_and_mutation
|
||||||
|
|
||||||
|
Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries.
|
||||||
|
|
||||||
|
Default value: `false`.
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS})
|
clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS})
|
||||||
target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
|
target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)
|
||||||
|
@ -12,38 +12,36 @@
|
|||||||
|
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||||
|
|
||||||
#include <base/scope_guard.h>
|
#include <base/scope_guard.h>
|
||||||
|
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
|
||||||
|
ContextMutablePtr context;
|
||||||
|
|
||||||
|
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||||
|
{
|
||||||
|
if (context)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
SharedContextHolder shared_context = Context::createShared();
|
||||||
|
context = Context::createGlobal(shared_context.get());
|
||||||
|
context->makeGlobalContext();
|
||||||
|
|
||||||
|
MainThreadStatus::getInstance();
|
||||||
|
|
||||||
|
registerAggregateFunctions();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
static SharedContextHolder shared_context;
|
|
||||||
static ContextMutablePtr context;
|
|
||||||
|
|
||||||
auto initialize = [&]() mutable
|
|
||||||
{
|
|
||||||
if (context)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
shared_context = Context::createShared();
|
|
||||||
context = Context::createGlobal(shared_context.get());
|
|
||||||
context->makeGlobalContext();
|
|
||||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
|
||||||
|
|
||||||
MainThreadStatus::getInstance();
|
|
||||||
|
|
||||||
registerAggregateFunctions();
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool initialized = initialize();
|
|
||||||
(void) initialized;
|
|
||||||
|
|
||||||
total_memory_tracker.resetCounters();
|
total_memory_tracker.resetCounters();
|
||||||
total_memory_tracker.setHardLimit(1_GiB);
|
total_memory_tracker.setHardLimit(1_GiB);
|
||||||
CurrentThread::get().memory_tracker.resetCounters();
|
CurrentThread::get().memory_tracker.resetCounters();
|
||||||
|
@ -86,7 +86,10 @@ inline std::string_view toDescription(OvercommitResult result)
|
|||||||
|
|
||||||
bool shouldTrackAllocation(Float64 probability, void * ptr)
|
bool shouldTrackAllocation(Float64 probability, void * ptr)
|
||||||
{
|
{
|
||||||
|
#pragma clang diagnostic push
|
||||||
|
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||||
return intHash64(uintptr_t(ptr)) < std::numeric_limits<uint64_t>::max() * probability;
|
return intHash64(uintptr_t(ptr)) < std::numeric_limits<uint64_t>::max() * probability;
|
||||||
|
#pragma clang diagnostic pop
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -166,6 +166,7 @@ namespace DB
|
|||||||
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \
|
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \
|
||||||
M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \
|
M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \
|
||||||
M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \
|
M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \
|
||||||
|
M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0)
|
||||||
|
|
||||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
||||||
|
|
||||||
|
@ -271,9 +271,12 @@ namespace
|
|||||||
if (d != 0.0 && !std::isnormal(d))
|
if (d != 0.0 && !std::isnormal(d))
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::CANNOT_PARSE_NUMBER, "A setting's value in seconds must be a normal floating point number or zero. Got {}", d);
|
ErrorCodes::CANNOT_PARSE_NUMBER, "A setting's value in seconds must be a normal floating point number or zero. Got {}", d);
|
||||||
|
#pragma clang diagnostic push
|
||||||
|
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||||
if (d * 1000000 > std::numeric_limits<Poco::Timespan::TimeDiff>::max() || d * 1000000 < std::numeric_limits<Poco::Timespan::TimeDiff>::min())
|
if (d * 1000000 > std::numeric_limits<Poco::Timespan::TimeDiff>::max() || d * 1000000 < std::numeric_limits<Poco::Timespan::TimeDiff>::min())
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::BAD_ARGUMENTS, "Cannot convert seconds to microseconds: the setting's value in seconds is too big: {}", d);
|
ErrorCodes::BAD_ARGUMENTS, "Cannot convert seconds to microseconds: the setting's value in seconds is too big: {}", d);
|
||||||
|
#pragma clang diagnostic pop
|
||||||
|
|
||||||
return static_cast<Poco::Timespan::TimeDiff>(d * 1000000);
|
return static_cast<Poco::Timespan::TimeDiff>(d * 1000000);
|
||||||
}
|
}
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
clickhouse_add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp)
|
clickhouse_add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp)
|
||||||
target_link_libraries (names_and_types_fuzzer PRIVATE dbms)
|
target_link_libraries (names_and_types_fuzzer PRIVATE dbms clickhouse_functions)
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS})
|
clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS})
|
||||||
target_link_libraries(data_type_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
|
target_link_libraries(data_type_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)
|
||||||
|
@ -12,35 +12,30 @@
|
|||||||
|
|
||||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||||
|
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
|
||||||
|
ContextMutablePtr context;
|
||||||
|
|
||||||
|
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||||
|
{
|
||||||
|
if (context)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
SharedContextHolder shared_context = Context::createShared();
|
||||||
|
context = Context::createGlobal(shared_context.get());
|
||||||
|
context->makeGlobalContext();
|
||||||
|
|
||||||
|
MainThreadStatus::getInstance();
|
||||||
|
|
||||||
|
registerAggregateFunctions();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
static SharedContextHolder shared_context;
|
|
||||||
static ContextMutablePtr context;
|
|
||||||
|
|
||||||
auto initialize = [&]() mutable
|
|
||||||
{
|
|
||||||
if (context)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
shared_context = Context::createShared();
|
|
||||||
context = Context::createGlobal(shared_context.get());
|
|
||||||
context->makeGlobalContext();
|
|
||||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
|
||||||
|
|
||||||
MainThreadStatus::getInstance();
|
|
||||||
|
|
||||||
registerAggregateFunctions();
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool initialized = initialize();
|
|
||||||
(void) initialized;
|
|
||||||
|
|
||||||
total_memory_tracker.resetCounters();
|
total_memory_tracker.resetCounters();
|
||||||
total_memory_tracker.setHardLimit(1_GiB);
|
total_memory_tracker.setHardLimit(1_GiB);
|
||||||
CurrentThread::get().memory_tracker.resetCounters();
|
CurrentThread::get().memory_tracker.resetCounters();
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
clickhouse_add_executable(format_fuzzer format_fuzzer.cpp ${SRCS})
|
clickhouse_add_executable(format_fuzzer format_fuzzer.cpp ${SRCS})
|
||||||
target_link_libraries(format_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
|
target_link_libraries(format_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)
|
||||||
|
@ -20,37 +20,32 @@
|
|||||||
|
|
||||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||||
|
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
|
||||||
|
ContextMutablePtr context;
|
||||||
|
|
||||||
|
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||||
|
{
|
||||||
|
if (context)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
SharedContextHolder shared_context = Context::createShared();
|
||||||
|
context = Context::createGlobal(shared_context.get());
|
||||||
|
context->makeGlobalContext();
|
||||||
|
|
||||||
|
MainThreadStatus::getInstance();
|
||||||
|
|
||||||
|
registerAggregateFunctions();
|
||||||
|
registerFormats();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
using namespace DB;
|
|
||||||
|
|
||||||
static SharedContextHolder shared_context;
|
|
||||||
static ContextMutablePtr context;
|
|
||||||
|
|
||||||
auto initialize = [&]() mutable
|
|
||||||
{
|
|
||||||
if (context)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
shared_context = Context::createShared();
|
|
||||||
context = Context::createGlobal(shared_context.get());
|
|
||||||
context->makeGlobalContext();
|
|
||||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
|
||||||
|
|
||||||
MainThreadStatus::getInstance();
|
|
||||||
|
|
||||||
registerAggregateFunctions();
|
|
||||||
registerFormats();
|
|
||||||
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool initialized = initialize();
|
|
||||||
(void) initialized;
|
|
||||||
|
|
||||||
total_memory_tracker.resetCounters();
|
total_memory_tracker.resetCounters();
|
||||||
total_memory_tracker.setHardLimit(1_GiB);
|
total_memory_tracker.setHardLimit(1_GiB);
|
||||||
CurrentThread::get().memory_tracker.resetCounters();
|
CurrentThread::get().memory_tracker.resetCounters();
|
||||||
|
@ -2146,7 +2146,10 @@ struct Transformer
|
|||||||
if constexpr (std::is_same_v<Additions, DateTimeAccurateConvertStrategyAdditions>
|
if constexpr (std::is_same_v<Additions, DateTimeAccurateConvertStrategyAdditions>
|
||||||
|| std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
|| std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
||||||
{
|
{
|
||||||
|
#pragma clang diagnostic push
|
||||||
|
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||||
bool is_valid_input = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL;
|
bool is_valid_input = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL;
|
||||||
|
#pragma clang diagnostic pop
|
||||||
if (!is_valid_input)
|
if (!is_valid_input)
|
||||||
{
|
{
|
||||||
if constexpr (std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
if constexpr (std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
||||||
|
@ -217,7 +217,10 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
Float64 num_bytes_with_decimals = base * iter->second;
|
Float64 num_bytes_with_decimals = base * iter->second;
|
||||||
|
#pragma clang diagnostic push
|
||||||
|
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||||
if (num_bytes_with_decimals > std::numeric_limits<UInt64>::max())
|
if (num_bytes_with_decimals > std::numeric_limits<UInt64>::max())
|
||||||
|
#pragma clang diagnostic pop
|
||||||
{
|
{
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::BAD_ARGUMENTS,
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
@ -3494,18 +3494,22 @@ DDLWorker & Context::getDDLWorker() const
|
|||||||
if (shared->ddl_worker_startup_task)
|
if (shared->ddl_worker_startup_task)
|
||||||
waitLoad(shared->ddl_worker_startup_task); // Just wait and do not prioritize, because it depends on all load and startup tasks
|
waitLoad(shared->ddl_worker_startup_task); // Just wait and do not prioritize, because it depends on all load and startup tasks
|
||||||
|
|
||||||
SharedLockGuard lock(shared->mutex);
|
|
||||||
if (!shared->ddl_worker)
|
|
||||||
{
|
{
|
||||||
if (!hasZooKeeper())
|
/// Only acquire the lock for reading ddl_worker field.
|
||||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no Zookeeper configuration in server config");
|
/// hasZooKeeper() and hasDistributedDDL() acquire the same lock as well and double acquisition of the lock in shared mode can lead
|
||||||
|
/// to a deadlock if an exclusive lock attempt is made in the meantime by another thread.
|
||||||
if (!hasDistributedDDL())
|
SharedLockGuard lock(shared->mutex);
|
||||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no DistributedDDL configuration in server config");
|
if (shared->ddl_worker)
|
||||||
|
return *shared->ddl_worker;
|
||||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "DDL background thread is not initialized");
|
|
||||||
}
|
}
|
||||||
return *shared->ddl_worker;
|
|
||||||
|
if (!hasZooKeeper())
|
||||||
|
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no Zookeeper configuration in server config");
|
||||||
|
|
||||||
|
if (!hasDistributedDDL())
|
||||||
|
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no DistributedDDL configuration in server config");
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "DDL background thread is not initialized");
|
||||||
}
|
}
|
||||||
|
|
||||||
zkutil::ZooKeeperPtr Context::getZooKeeper() const
|
zkutil::ZooKeeperPtr Context::getZooKeeper() const
|
||||||
|
@ -1407,7 +1407,10 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid)
|
|||||||
});
|
});
|
||||||
|
|
||||||
/// TSA doesn't support unique_lock
|
/// TSA doesn't support unique_lock
|
||||||
if (TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid))
|
const bool has_table = TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid);
|
||||||
|
LOG_DEBUG(log, "Done waiting for the table {} to be dropped. The outcome: {}", toString(uuid), has_table ? "table still exists" : "table dropped successfully");
|
||||||
|
|
||||||
|
if (has_table)
|
||||||
throw Exception(ErrorCodes::UNFINISHED, "Did not finish dropping the table with UUID {} because the server is shutting down, "
|
throw Exception(ErrorCodes::UNFINISHED, "Did not finish dropping the table with UUID {} because the server is shutting down, "
|
||||||
"will finish after restart", uuid);
|
"will finish after restart", uuid);
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <Access/Common/AccessRightsElement.h>
|
#include <Access/Common/AccessRightsElement.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
|
#include <Core/ServerSettings.h>
|
||||||
#include <Databases/DatabaseFactory.h>
|
#include <Databases/DatabaseFactory.h>
|
||||||
#include <Databases/DatabaseReplicated.h>
|
#include <Databases/DatabaseReplicated.h>
|
||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
@ -47,6 +48,7 @@ namespace ErrorCodes
|
|||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int UNKNOWN_TABLE;
|
extern const int UNKNOWN_TABLE;
|
||||||
extern const int UNKNOWN_DATABASE;
|
extern const int UNKNOWN_DATABASE;
|
||||||
|
extern const int QUERY_IS_PROHIBITED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -191,6 +193,12 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter)
|
|||||||
"to execute ALTERs of different types (replicated and non replicated) in single query");
|
"to execute ALTERs of different types (replicated and non replicated) in single query");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mutation_commands.hasNonEmptyMutationCommands() || !partition_commands.empty())
|
||||||
|
{
|
||||||
|
if (getContext()->getServerSettings().disable_insertion_and_mutation)
|
||||||
|
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Mutations are prohibited");
|
||||||
|
}
|
||||||
|
|
||||||
if (!alter_commands.empty())
|
if (!alter_commands.empty())
|
||||||
{
|
{
|
||||||
auto alter_lock = table->lockForAlter(getContext()->getSettingsRef().lock_acquire_timeout);
|
auto alter_lock = table->lockForAlter(getContext()->getSettingsRef().lock_acquire_timeout);
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include <Access/ContextAccess.h>
|
#include <Access/ContextAccess.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
|
#include <Core/ServerSettings.h>
|
||||||
#include <Databases/DatabaseReplicated.h>
|
#include <Databases/DatabaseReplicated.h>
|
||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
@ -27,6 +28,7 @@ namespace ErrorCodes
|
|||||||
extern const int SUPPORT_IS_DISABLED;
|
extern const int SUPPORT_IS_DISABLED;
|
||||||
extern const int BAD_ARGUMENTS;
|
extern const int BAD_ARGUMENTS;
|
||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
extern const int QUERY_IS_PROHIBITED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -51,6 +53,9 @@ BlockIO InterpreterDeleteQuery::execute()
|
|||||||
if (table->isStaticStorage())
|
if (table->isStaticStorage())
|
||||||
throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only");
|
throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only");
|
||||||
|
|
||||||
|
if (getContext()->getGlobalContext()->getServerSettings().disable_insertion_and_mutation)
|
||||||
|
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Delete queries are prohibited");
|
||||||
|
|
||||||
DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name);
|
DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name);
|
||||||
if (database->shouldReplicateQuery(getContext(), query_ptr))
|
if (database->shouldReplicateQuery(getContext(), query_ptr))
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
#include <Columns/ColumnNullable.h>
|
#include <Columns/ColumnNullable.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
|
#include <Core/ServerSettings.h>
|
||||||
#include <Processors/Transforms/buildPushingToViewsChain.h>
|
#include <Processors/Transforms/buildPushingToViewsChain.h>
|
||||||
#include <DataTypes/DataTypeNullable.h>
|
#include <DataTypes/DataTypeNullable.h>
|
||||||
#include <Interpreters/DatabaseCatalog.h>
|
#include <Interpreters/DatabaseCatalog.h>
|
||||||
@ -60,6 +61,7 @@ namespace ErrorCodes
|
|||||||
extern const int NO_SUCH_COLUMN_IN_TABLE;
|
extern const int NO_SUCH_COLUMN_IN_TABLE;
|
||||||
extern const int ILLEGAL_COLUMN;
|
extern const int ILLEGAL_COLUMN;
|
||||||
extern const int DUPLICATE_COLUMN;
|
extern const int DUPLICATE_COLUMN;
|
||||||
|
extern const int QUERY_IS_PROHIBITED;
|
||||||
}
|
}
|
||||||
|
|
||||||
InterpreterInsertQuery::InterpreterInsertQuery(
|
InterpreterInsertQuery::InterpreterInsertQuery(
|
||||||
@ -732,6 +734,9 @@ BlockIO InterpreterInsertQuery::execute()
|
|||||||
const Settings & settings = getContext()->getSettingsRef();
|
const Settings & settings = getContext()->getSettingsRef();
|
||||||
auto & query = query_ptr->as<ASTInsertQuery &>();
|
auto & query = query_ptr->as<ASTInsertQuery &>();
|
||||||
|
|
||||||
|
if (getContext()->getServerSettings().disable_insertion_and_mutation
|
||||||
|
&& query.table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE)
|
||||||
|
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Insert queries are prohibited");
|
||||||
|
|
||||||
StoragePtr table = getTable(query);
|
StoragePtr table = getTable(query);
|
||||||
checkStorageSupportsTransactionsIfNeeded(table, getContext());
|
checkStorageSupportsTransactionsIfNeeded(table, getContext());
|
||||||
|
@ -14,41 +14,37 @@
|
|||||||
|
|
||||||
using namespace DB;
|
using namespace DB;
|
||||||
|
|
||||||
|
|
||||||
|
ContextMutablePtr context;
|
||||||
|
|
||||||
|
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||||
|
{
|
||||||
|
if (context)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
SharedContextHolder shared_context = Context::createShared();
|
||||||
|
context = Context::createGlobal(shared_context.get());
|
||||||
|
context->makeGlobalContext();
|
||||||
|
|
||||||
|
registerInterpreters();
|
||||||
|
registerFunctions();
|
||||||
|
registerAggregateFunctions();
|
||||||
|
registerTableFunctions();
|
||||||
|
registerDatabases();
|
||||||
|
registerStorages();
|
||||||
|
registerDictionaries();
|
||||||
|
registerDisks(/* global_skip_access_check= */ true);
|
||||||
|
registerFormats();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
std::string input = std::string(reinterpret_cast<const char*>(data), size);
|
std::string input = std::string(reinterpret_cast<const char*>(data), size);
|
||||||
|
|
||||||
static SharedContextHolder shared_context;
|
|
||||||
static ContextMutablePtr context;
|
|
||||||
|
|
||||||
auto initialize = [&]() mutable
|
|
||||||
{
|
|
||||||
if (context)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
shared_context = Context::createShared();
|
|
||||||
context = Context::createGlobal(shared_context.get());
|
|
||||||
context->makeGlobalContext();
|
|
||||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
|
||||||
|
|
||||||
registerInterpreters();
|
|
||||||
registerFunctions();
|
|
||||||
registerAggregateFunctions();
|
|
||||||
registerTableFunctions();
|
|
||||||
registerDatabases();
|
|
||||||
registerStorages();
|
|
||||||
registerDictionaries();
|
|
||||||
registerDisks(/* global_skip_access_check= */ true);
|
|
||||||
registerFormats();
|
|
||||||
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
static bool initialized = initialize();
|
|
||||||
(void) initialized;
|
|
||||||
|
|
||||||
auto io = DB::executeQuery(input, context, QueryFlags{ .internal = true }, QueryProcessingStage::Complete).second;
|
auto io = DB::executeQuery(input, context, QueryFlags{ .internal = true }, QueryProcessingStage::Complete).second;
|
||||||
|
|
||||||
PullingPipelineExecutor executor(io.pipeline);
|
PullingPipelineExecutor executor(io.pipeline);
|
||||||
|
@ -27,7 +27,8 @@ DEFINE_BINARY_PROTO_FUZZER(const Sentence& main)
|
|||||||
DB::ParserQueryWithOutput parser(input.data() + input.size());
|
DB::ParserQueryWithOutput parser(input.data() + input.size());
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
|
DB::ASTPtr ast
|
||||||
|
= parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, DB::DBMS_DEFAULT_MAX_PARSER_BACKTRACKS);
|
||||||
|
|
||||||
DB::WriteBufferFromOStream out(std::cerr, 4096);
|
DB::WriteBufferFromOStream out(std::cerr, 4096);
|
||||||
DB::formatAST(*ast, out);
|
DB::formatAST(*ast, out);
|
||||||
|
@ -14,7 +14,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
|||||||
std::string input = std::string(reinterpret_cast<const char*>(data), size);
|
std::string input = std::string(reinterpret_cast<const char*>(data), size);
|
||||||
|
|
||||||
DB::ParserCreateQuery parser;
|
DB::ParserCreateQuery parser;
|
||||||
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000);
|
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000, DB::DBMS_DEFAULT_MAX_PARSER_BACKTRACKS);
|
||||||
|
|
||||||
const UInt64 max_ast_depth = 1000;
|
const UInt64 max_ast_depth = 1000;
|
||||||
ast->checkDepth(max_ast_depth);
|
ast->checkDepth(max_ast_depth);
|
||||||
|
@ -5557,12 +5557,16 @@ public:
|
|||||||
auto it = temp_part_dirs.find(part_name);
|
auto it = temp_part_dirs.find(part_name);
|
||||||
if (it == temp_part_dirs.end())
|
if (it == temp_part_dirs.end())
|
||||||
{
|
{
|
||||||
auto temp_part_dir = std::make_shared<TemporaryFileOnDisk>(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-"));
|
auto temp_dir_deleter = std::make_unique<TemporaryFileOnDisk>(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-"));
|
||||||
|
auto temp_part_dir = fs::path{temp_dir_deleter->getRelativePath()}.filename();
|
||||||
/// Attaching parts will rename them so it's expected for a temporary part directory not to exist anymore in the end.
|
/// Attaching parts will rename them so it's expected for a temporary part directory not to exist anymore in the end.
|
||||||
temp_part_dir->setShowWarningIfRemoved(false);
|
temp_dir_deleter->setShowWarningIfRemoved(false);
|
||||||
it = temp_part_dirs.emplace(part_name, temp_part_dir).first;
|
/// The following holder is needed to prevent clearOldTemporaryDirectories() from clearing `temp_part_dir` before we attach the part.
|
||||||
|
auto temp_dir_holder = storage->getTemporaryPartDirectoryHolder(temp_part_dir);
|
||||||
|
it = temp_part_dirs.emplace(part_name,
|
||||||
|
std::make_pair(std::move(temp_dir_deleter), std::move(temp_dir_holder))).first;
|
||||||
}
|
}
|
||||||
return it->second->getRelativePath();
|
return it->second.first->getRelativePath();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -5588,7 +5592,7 @@ private:
|
|||||||
size_t num_parts = 0;
|
size_t num_parts = 0;
|
||||||
size_t num_broken_parts = 0;
|
size_t num_broken_parts = 0;
|
||||||
MutableDataPartsVector parts;
|
MutableDataPartsVector parts;
|
||||||
std::map<String /* part_name*/, std::shared_ptr<TemporaryFileOnDisk>> temp_part_dirs;
|
std::map<String /* part_name*/, std::pair<std::unique_ptr<TemporaryFileOnDisk>, scope_guard>> temp_part_dirs;
|
||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
#include <Common/ZooKeeper/Types.h>
|
#include <Common/ZooKeeper/Types.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||||
|
#include <Common/ZooKeeper/ZooKeeperRetries.h>
|
||||||
|
|
||||||
#include <Backups/BackupEntriesCollector.h>
|
#include <Backups/BackupEntriesCollector.h>
|
||||||
#include <Backups/IBackupCoordination.h>
|
#include <Backups/IBackupCoordination.h>
|
||||||
@ -78,6 +79,7 @@ namespace ErrorCodes
|
|||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
extern const int LIMIT_EXCEEDED;
|
extern const int LIMIT_EXCEEDED;
|
||||||
extern const int CANNOT_RESTORE_TABLE;
|
extern const int CANNOT_RESTORE_TABLE;
|
||||||
|
extern const int INVALID_STATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
@ -120,7 +122,7 @@ public:
|
|||||||
: SinkToStorage(header), storage(storage_), context(std::move(context_))
|
: SinkToStorage(header), storage(storage_), context(std::move(context_))
|
||||||
{
|
{
|
||||||
auto primary_key = storage.getPrimaryKey();
|
auto primary_key = storage.getPrimaryKey();
|
||||||
assert(primary_key.size() == 1);
|
chassert(primary_key.size() == 1);
|
||||||
primary_key_pos = getHeader().getPositionByName(primary_key[0]);
|
primary_key_pos = getHeader().getPositionByName(primary_key[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -171,81 +173,94 @@ public:
|
|||||||
template <bool for_update>
|
template <bool for_update>
|
||||||
void finalize(bool strict)
|
void finalize(bool strict)
|
||||||
{
|
{
|
||||||
auto zookeeper = storage.getClient();
|
const auto & settings = context->getSettingsRef();
|
||||||
|
|
||||||
auto keys_limit = storage.keysLimit();
|
ZooKeeperRetriesControl zk_retry{
|
||||||
|
getName(),
|
||||||
|
getLogger(getName()),
|
||||||
|
ZooKeeperRetriesInfo{
|
||||||
|
settings.insert_keeper_max_retries,
|
||||||
|
settings.insert_keeper_retry_initial_backoff_ms,
|
||||||
|
settings.insert_keeper_retry_max_backoff_ms},
|
||||||
|
context->getProcessListElement()};
|
||||||
|
|
||||||
size_t current_keys_num = 0;
|
zk_retry.retryLoop([&]()
|
||||||
size_t new_keys_num = 0;
|
|
||||||
|
|
||||||
// We use keys limit as a soft limit so we ignore some cases when it can be still exceeded
|
|
||||||
// (e.g if parallel insert queries are being run)
|
|
||||||
if (keys_limit != 0)
|
|
||||||
{
|
{
|
||||||
Coordination::Stat data_stat;
|
auto zookeeper = storage.getClient();
|
||||||
zookeeper->get(storage.dataPath(), &data_stat);
|
auto keys_limit = storage.keysLimit();
|
||||||
current_keys_num = data_stat.numChildren;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<std::string> key_paths;
|
size_t current_keys_num = 0;
|
||||||
key_paths.reserve(new_values.size());
|
size_t new_keys_num = 0;
|
||||||
for (const auto & [key, _] : new_values)
|
|
||||||
key_paths.push_back(storage.fullPathForKey(key));
|
|
||||||
|
|
||||||
zkutil::ZooKeeper::MultiExistsResponse results;
|
// We use keys limit as a soft limit so we ignore some cases when it can be still exceeded
|
||||||
|
// (e.g if parallel insert queries are being run)
|
||||||
if constexpr (!for_update)
|
if (keys_limit != 0)
|
||||||
{
|
|
||||||
if (!strict)
|
|
||||||
results = zookeeper->exists(key_paths);
|
|
||||||
}
|
|
||||||
|
|
||||||
Coordination::Requests requests;
|
|
||||||
requests.reserve(key_paths.size());
|
|
||||||
for (size_t i = 0; i < key_paths.size(); ++i)
|
|
||||||
{
|
|
||||||
auto key = fs::path(key_paths[i]).filename();
|
|
||||||
|
|
||||||
if constexpr (for_update)
|
|
||||||
{
|
{
|
||||||
int32_t version = -1;
|
Coordination::Stat data_stat;
|
||||||
if (strict)
|
zookeeper->get(storage.dataPath(), &data_stat);
|
||||||
version = versions.at(key);
|
current_keys_num = data_stat.numChildren;
|
||||||
|
|
||||||
requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], version));
|
|
||||||
}
|
}
|
||||||
else
|
|
||||||
|
std::vector<std::string> key_paths;
|
||||||
|
key_paths.reserve(new_values.size());
|
||||||
|
for (const auto & [key, _] : new_values)
|
||||||
|
key_paths.push_back(storage.fullPathForKey(key));
|
||||||
|
|
||||||
|
zkutil::ZooKeeper::MultiExistsResponse results;
|
||||||
|
|
||||||
|
if constexpr (!for_update)
|
||||||
{
|
{
|
||||||
if (!strict && results[i].error == Coordination::Error::ZOK)
|
if (!strict)
|
||||||
|
results = zookeeper->exists(key_paths);
|
||||||
|
}
|
||||||
|
|
||||||
|
Coordination::Requests requests;
|
||||||
|
requests.reserve(key_paths.size());
|
||||||
|
for (size_t i = 0; i < key_paths.size(); ++i)
|
||||||
|
{
|
||||||
|
auto key = fs::path(key_paths[i]).filename();
|
||||||
|
|
||||||
|
if constexpr (for_update)
|
||||||
{
|
{
|
||||||
requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], -1));
|
int32_t version = -1;
|
||||||
|
if (strict)
|
||||||
|
version = versions.at(key);
|
||||||
|
|
||||||
|
requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], version));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
requests.push_back(zkutil::makeCreateRequest(key_paths[i], new_values[key], zkutil::CreateMode::Persistent));
|
if (!strict && results[i].error == Coordination::Error::ZOK)
|
||||||
++new_keys_num;
|
{
|
||||||
|
requests.push_back(zkutil::makeSetRequest(key_paths[i], new_values[key], -1));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
requests.push_back(zkutil::makeCreateRequest(key_paths[i], new_values[key], zkutil::CreateMode::Persistent));
|
||||||
|
++new_keys_num;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (new_keys_num != 0)
|
if (new_keys_num != 0)
|
||||||
{
|
{
|
||||||
auto will_be = current_keys_num + new_keys_num;
|
auto will_be = current_keys_num + new_keys_num;
|
||||||
if (keys_limit != 0 && will_be > keys_limit)
|
if (keys_limit != 0 && will_be > keys_limit)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::LIMIT_EXCEEDED,
|
ErrorCodes::LIMIT_EXCEEDED,
|
||||||
"Limit would be exceeded by inserting {} new key(s). Limit is {}, while the number of keys would be {}",
|
"Limit would be exceeded by inserting {} new key(s). Limit is {}, while the number of keys would be {}",
|
||||||
new_keys_num,
|
new_keys_num,
|
||||||
keys_limit,
|
keys_limit,
|
||||||
will_be);
|
will_be);
|
||||||
}
|
}
|
||||||
|
|
||||||
zookeeper->multi(requests, /* check_session_valid */ true);
|
zookeeper->multi(requests, /* check_session_valid */ true);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename KeyContainer>
|
template <typename KeyContainer>
|
||||||
class StorageKeeperMapSource : public ISource
|
class StorageKeeperMapSource : public ISource, WithContext
|
||||||
{
|
{
|
||||||
const StorageKeeperMap & storage;
|
const StorageKeeperMap & storage;
|
||||||
size_t max_block_size;
|
size_t max_block_size;
|
||||||
@ -276,8 +291,15 @@ public:
|
|||||||
KeyContainerPtr container_,
|
KeyContainerPtr container_,
|
||||||
KeyContainerIter begin_,
|
KeyContainerIter begin_,
|
||||||
KeyContainerIter end_,
|
KeyContainerIter end_,
|
||||||
bool with_version_column_)
|
bool with_version_column_,
|
||||||
: ISource(getHeader(header, with_version_column_)), storage(storage_), max_block_size(max_block_size_), container(std::move(container_)), it(begin_), end(end_)
|
ContextPtr context_)
|
||||||
|
: ISource(getHeader(header, with_version_column_))
|
||||||
|
, WithContext(std::move(context_))
|
||||||
|
, storage(storage_)
|
||||||
|
, max_block_size(max_block_size_)
|
||||||
|
, container(std::move(container_))
|
||||||
|
, it(begin_)
|
||||||
|
, end(end_)
|
||||||
, with_version_column(with_version_column_)
|
, with_version_column(with_version_column_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -302,12 +324,12 @@ public:
|
|||||||
for (auto & raw_key : raw_keys)
|
for (auto & raw_key : raw_keys)
|
||||||
raw_key = base64Encode(raw_key, /* url_encoding */ true);
|
raw_key = base64Encode(raw_key, /* url_encoding */ true);
|
||||||
|
|
||||||
return storage.getBySerializedKeys(raw_keys, nullptr, with_version_column);
|
return storage.getBySerializedKeys(raw_keys, nullptr, with_version_column, getContext());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
size_t elem_num = std::min(max_block_size, static_cast<size_t>(end - it));
|
size_t elem_num = std::min(max_block_size, static_cast<size_t>(end - it));
|
||||||
auto chunk = storage.getBySerializedKeys(std::span{it, it + elem_num}, nullptr, with_version_column);
|
auto chunk = storage.getBySerializedKeys(std::span{it, it + elem_num}, nullptr, with_version_column, getContext());
|
||||||
it += elem_num;
|
it += elem_num;
|
||||||
return chunk;
|
return chunk;
|
||||||
}
|
}
|
||||||
@ -386,104 +408,192 @@ StorageKeeperMap::StorageKeeperMap(
|
|||||||
|
|
||||||
if (attach)
|
if (attach)
|
||||||
{
|
{
|
||||||
checkTable<false>();
|
checkTable<false>(context_);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto client = getClient();
|
const auto & settings = context_->getSettingsRef();
|
||||||
|
ZooKeeperRetriesControl zk_retry{
|
||||||
|
getName(),
|
||||||
|
getLogger(getName()),
|
||||||
|
ZooKeeperRetriesInfo{settings.keeper_max_retries, settings.keeper_retry_initial_backoff_ms, settings.keeper_retry_max_backoff_ms},
|
||||||
|
context_->getProcessListElement()};
|
||||||
|
|
||||||
if (zk_root_path != "/" && !client->exists(zk_root_path))
|
zk_retry.retryLoop(
|
||||||
{
|
[&]
|
||||||
LOG_TRACE(log, "Creating root path {}", zk_root_path);
|
{
|
||||||
client->createAncestors(zk_root_path);
|
auto client = getClient();
|
||||||
client->createIfNotExists(zk_root_path, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if (zk_root_path != "/" && !client->exists(zk_root_path))
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Creating root path {}", zk_root_path);
|
||||||
|
client->createAncestors(zk_root_path);
|
||||||
|
client->createIfNotExists(zk_root_path, "");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
std::shared_ptr<zkutil::EphemeralNodeHolder> metadata_drop_lock;
|
||||||
|
int32_t drop_lock_version = -1;
|
||||||
for (size_t i = 0; i < 1000; ++i)
|
for (size_t i = 0; i < 1000; ++i)
|
||||||
{
|
{
|
||||||
std::string stored_metadata_string;
|
bool success = false;
|
||||||
auto exists = client->tryGet(zk_metadata_path, stored_metadata_string);
|
zk_retry.retryLoop(
|
||||||
|
[&]
|
||||||
if (exists)
|
|
||||||
{
|
|
||||||
// this requires same name for columns
|
|
||||||
// maybe we can do a smarter comparison for columns and primary key expression
|
|
||||||
if (stored_metadata_string != metadata_string)
|
|
||||||
throw Exception(
|
|
||||||
ErrorCodes::BAD_ARGUMENTS,
|
|
||||||
"Path {} is already used but the stored table definition doesn't match. Stored metadata: {}",
|
|
||||||
zk_root_path,
|
|
||||||
stored_metadata_string);
|
|
||||||
|
|
||||||
auto code = client->tryCreate(zk_table_path, "", zkutil::CreateMode::Persistent);
|
|
||||||
|
|
||||||
/// A table on the same Keeper path already exists, we just appended our table id to subscribe as a new replica
|
|
||||||
/// We still don't know if the table matches the expected metadata so table_is_valid is not changed
|
|
||||||
/// It will be checked lazily on the first operation
|
|
||||||
if (code == Coordination::Error::ZOK)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (code != Coordination::Error::ZNONODE)
|
|
||||||
throw zkutil::KeeperException(code, "Failed to create table on path {} because a table with same UUID already exists", zk_root_path);
|
|
||||||
|
|
||||||
/// ZNONODE means we dropped zk_tables_path but didn't finish drop completely
|
|
||||||
}
|
|
||||||
|
|
||||||
if (client->exists(zk_dropped_path))
|
|
||||||
{
|
|
||||||
LOG_INFO(log, "Removing leftover nodes");
|
|
||||||
auto code = client->tryCreate(zk_dropped_lock_path, "", zkutil::CreateMode::Ephemeral);
|
|
||||||
|
|
||||||
if (code == Coordination::Error::ZNONODE)
|
|
||||||
{
|
{
|
||||||
LOG_INFO(log, "Someone else removed leftover nodes");
|
auto client = getClient();
|
||||||
}
|
std::string stored_metadata_string;
|
||||||
else if (code == Coordination::Error::ZNODEEXISTS)
|
auto exists = client->tryGet(zk_metadata_path, stored_metadata_string);
|
||||||
{
|
|
||||||
LOG_INFO(log, "Someone else is removing leftover nodes");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else if (code != Coordination::Error::ZOK)
|
|
||||||
{
|
|
||||||
throw Coordination::Exception::fromPath(code, zk_dropped_lock_path);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(zk_dropped_lock_path, *client);
|
|
||||||
if (!dropTable(client, metadata_drop_lock))
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Coordination::Requests create_requests
|
if (exists)
|
||||||
{
|
{
|
||||||
zkutil::makeCreateRequest(zk_metadata_path, metadata_string, zkutil::CreateMode::Persistent),
|
// this requires same name for columns
|
||||||
zkutil::makeCreateRequest(zk_data_path, metadata_string, zkutil::CreateMode::Persistent),
|
// maybe we can do a smarter comparison for columns and primary key expression
|
||||||
zkutil::makeCreateRequest(zk_tables_path, "", zkutil::CreateMode::Persistent),
|
if (stored_metadata_string != metadata_string)
|
||||||
zkutil::makeCreateRequest(zk_table_path, "", zkutil::CreateMode::Persistent),
|
throw Exception(
|
||||||
};
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"Path {} is already used but the stored table definition doesn't match. Stored metadata: {}",
|
||||||
|
zk_root_path,
|
||||||
|
stored_metadata_string);
|
||||||
|
|
||||||
Coordination::Responses create_responses;
|
auto code = client->tryCreate(zk_table_path, "", zkutil::CreateMode::Persistent);
|
||||||
auto code = client->tryMulti(create_requests, create_responses);
|
|
||||||
if (code == Coordination::Error::ZNODEEXISTS)
|
|
||||||
{
|
|
||||||
LOG_INFO(log, "It looks like a table on path {} was created by another server at the same moment, will retry", zk_root_path);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else if (code != Coordination::Error::ZOK)
|
|
||||||
{
|
|
||||||
zkutil::KeeperMultiException::check(code, create_requests, create_responses);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
/// A table on the same Keeper path already exists, we just appended our table id to subscribe as a new replica
|
||||||
|
/// We still don't know if the table matches the expected metadata so table_is_valid is not changed
|
||||||
|
/// It will be checked lazily on the first operation
|
||||||
|
if (code == Coordination::Error::ZOK)
|
||||||
|
{
|
||||||
|
success = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
table_is_valid = true;
|
/// We most likely created the path but got a timeout or disconnect
|
||||||
/// we are the first table created for the specified Keeper path, i.e. we are the first replica
|
if (code == Coordination::Error::ZNODEEXISTS && zk_retry.isRetry())
|
||||||
return;
|
{
|
||||||
|
success = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code != Coordination::Error::ZNONODE)
|
||||||
|
throw zkutil::KeeperException(
|
||||||
|
code, "Failed to create table on path {} because a table with same UUID already exists", zk_root_path);
|
||||||
|
|
||||||
|
/// ZNONODE means we dropped zk_tables_path but didn't finish drop completely
|
||||||
|
}
|
||||||
|
|
||||||
|
if (client->exists(zk_dropped_path))
|
||||||
|
{
|
||||||
|
LOG_INFO(log, "Removing leftover nodes");
|
||||||
|
|
||||||
|
bool drop_finished = false;
|
||||||
|
if (zk_retry.isRetry() && metadata_drop_lock != nullptr && drop_lock_version != -1)
|
||||||
|
{
|
||||||
|
/// if we have leftover lock from previous try, we need to recreate the ephemeral with our session
|
||||||
|
Coordination::Requests drop_lock_requests{
|
||||||
|
zkutil::makeRemoveRequest(zk_dropped_lock_path, drop_lock_version),
|
||||||
|
zkutil::makeCreateRequest(zk_dropped_lock_path, "", zkutil::CreateMode::Ephemeral),
|
||||||
|
};
|
||||||
|
|
||||||
|
Coordination::Responses drop_lock_responses;
|
||||||
|
auto lock_code = client->tryMulti(drop_lock_requests, drop_lock_responses);
|
||||||
|
if (lock_code == Coordination::Error::ZBADVERSION)
|
||||||
|
{
|
||||||
|
LOG_INFO(log, "Someone else is removing leftover nodes");
|
||||||
|
metadata_drop_lock->setAlreadyRemoved();
|
||||||
|
metadata_drop_lock.reset();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (drop_lock_responses[0]->error == Coordination::Error::ZNONODE)
|
||||||
|
{
|
||||||
|
/// someone else removed metadata nodes or the previous ephemeral node expired
|
||||||
|
/// we will try creating dropped lock again to make sure
|
||||||
|
metadata_drop_lock->setAlreadyRemoved();
|
||||||
|
metadata_drop_lock.reset();
|
||||||
|
}
|
||||||
|
else if (lock_code == Coordination::Error::ZOK)
|
||||||
|
{
|
||||||
|
metadata_drop_lock->setAlreadyRemoved();
|
||||||
|
metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(zk_dropped_lock_path, *client);
|
||||||
|
drop_lock_version = -1;
|
||||||
|
Coordination::Stat lock_stat;
|
||||||
|
client->get(zk_dropped_lock_path, &lock_stat);
|
||||||
|
drop_lock_version = lock_stat.version;
|
||||||
|
if (!dropTable(client, metadata_drop_lock))
|
||||||
|
{
|
||||||
|
metadata_drop_lock.reset();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
drop_finished = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!drop_finished)
|
||||||
|
{
|
||||||
|
auto code = client->tryCreate(zk_dropped_lock_path, "", zkutil::CreateMode::Ephemeral);
|
||||||
|
|
||||||
|
if (code == Coordination::Error::ZNONODE)
|
||||||
|
{
|
||||||
|
LOG_INFO(log, "Someone else removed leftover nodes");
|
||||||
|
}
|
||||||
|
else if (code == Coordination::Error::ZNODEEXISTS)
|
||||||
|
{
|
||||||
|
LOG_INFO(log, "Someone else is removing leftover nodes");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
else if (code != Coordination::Error::ZOK)
|
||||||
|
{
|
||||||
|
throw Coordination::Exception::fromPath(code, zk_dropped_lock_path);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
metadata_drop_lock = zkutil::EphemeralNodeHolder::existing(zk_dropped_lock_path, *client);
|
||||||
|
drop_lock_version = -1;
|
||||||
|
Coordination::Stat lock_stat;
|
||||||
|
client->get(zk_dropped_lock_path, &lock_stat);
|
||||||
|
drop_lock_version = lock_stat.version;
|
||||||
|
if (!dropTable(client, metadata_drop_lock))
|
||||||
|
{
|
||||||
|
metadata_drop_lock.reset();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Coordination::Requests create_requests{
|
||||||
|
zkutil::makeCreateRequest(zk_metadata_path, metadata_string, zkutil::CreateMode::Persistent),
|
||||||
|
zkutil::makeCreateRequest(zk_data_path, metadata_string, zkutil::CreateMode::Persistent),
|
||||||
|
zkutil::makeCreateRequest(zk_tables_path, "", zkutil::CreateMode::Persistent),
|
||||||
|
zkutil::makeCreateRequest(zk_table_path, "", zkutil::CreateMode::Persistent),
|
||||||
|
};
|
||||||
|
|
||||||
|
Coordination::Responses create_responses;
|
||||||
|
auto code = client->tryMulti(create_requests, create_responses);
|
||||||
|
if (code == Coordination::Error::ZNODEEXISTS)
|
||||||
|
{
|
||||||
|
LOG_INFO(
|
||||||
|
log, "It looks like a table on path {} was created by another server at the same moment, will retry", zk_root_path);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
else if (code != Coordination::Error::ZOK)
|
||||||
|
{
|
||||||
|
zkutil::KeeperMultiException::check(code, create_requests, create_responses);
|
||||||
|
}
|
||||||
|
|
||||||
|
table_status = TableStatus::VALID;
|
||||||
|
/// we are the first table created for the specified Keeper path, i.e. we are the first replica
|
||||||
|
success = true;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (success)
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(
|
||||||
"Cannot create metadata for table, because it is removed concurrently or because "
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
"of wrong zk_root_path ({})", zk_root_path);
|
"Cannot create metadata for table, because it is removed concurrently or because "
|
||||||
|
"of wrong zk_root_path ({})",
|
||||||
|
zk_root_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -496,7 +606,7 @@ Pipe StorageKeeperMap::read(
|
|||||||
size_t max_block_size,
|
size_t max_block_size,
|
||||||
size_t num_streams)
|
size_t num_streams)
|
||||||
{
|
{
|
||||||
checkTable<true>();
|
checkTable<true>(context_);
|
||||||
storage_snapshot->check(column_names);
|
storage_snapshot->check(column_names);
|
||||||
|
|
||||||
FieldVectorPtr filtered_keys;
|
FieldVectorPtr filtered_keys;
|
||||||
@ -529,8 +639,8 @@ Pipe StorageKeeperMap::read(
|
|||||||
size_t num_keys = keys->size();
|
size_t num_keys = keys->size();
|
||||||
size_t num_threads = std::min<size_t>(num_streams, keys->size());
|
size_t num_threads = std::min<size_t>(num_streams, keys->size());
|
||||||
|
|
||||||
assert(num_keys <= std::numeric_limits<uint32_t>::max());
|
chassert(num_keys <= std::numeric_limits<uint32_t>::max());
|
||||||
assert(num_threads <= std::numeric_limits<uint32_t>::max());
|
chassert(num_threads <= std::numeric_limits<uint32_t>::max());
|
||||||
|
|
||||||
for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx)
|
for (size_t thread_idx = 0; thread_idx < num_threads; ++thread_idx)
|
||||||
{
|
{
|
||||||
@ -539,29 +649,59 @@ Pipe StorageKeeperMap::read(
|
|||||||
|
|
||||||
using KeyContainer = typename KeyContainerPtr::element_type;
|
using KeyContainer = typename KeyContainerPtr::element_type;
|
||||||
pipes.emplace_back(std::make_shared<StorageKeeperMapSource<KeyContainer>>(
|
pipes.emplace_back(std::make_shared<StorageKeeperMapSource<KeyContainer>>(
|
||||||
*this, sample_block, max_block_size, keys, keys->begin() + begin, keys->begin() + end, with_version_column));
|
*this, sample_block, max_block_size, keys, keys->begin() + begin, keys->begin() + end, with_version_column, context_));
|
||||||
}
|
}
|
||||||
return Pipe::unitePipes(std::move(pipes));
|
return Pipe::unitePipes(std::move(pipes));
|
||||||
};
|
};
|
||||||
|
|
||||||
auto client = getClient();
|
|
||||||
if (all_scan)
|
if (all_scan)
|
||||||
return process_keys(std::make_shared<std::vector<std::string>>(client->getChildren(zk_data_path)));
|
{
|
||||||
|
const auto & settings = context_->getSettingsRef();
|
||||||
|
ZooKeeperRetriesControl zk_retry{
|
||||||
|
getName(),
|
||||||
|
getLogger(getName()),
|
||||||
|
ZooKeeperRetriesInfo{
|
||||||
|
settings.keeper_max_retries,
|
||||||
|
settings.keeper_retry_initial_backoff_ms,
|
||||||
|
settings.keeper_retry_max_backoff_ms},
|
||||||
|
context_->getProcessListElement()};
|
||||||
|
|
||||||
|
std::vector<std::string> children;
|
||||||
|
zk_retry.retryLoop([&]
|
||||||
|
{
|
||||||
|
auto client = getClient();
|
||||||
|
children = client->getChildren(zk_data_path);
|
||||||
|
});
|
||||||
|
return process_keys(std::make_shared<std::vector<std::string>>(std::move(children)));
|
||||||
|
}
|
||||||
|
|
||||||
return process_keys(std::move(filtered_keys));
|
return process_keys(std::move(filtered_keys));
|
||||||
}
|
}
|
||||||
|
|
||||||
SinkToStoragePtr StorageKeeperMap::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context, bool /*async_insert*/)
|
SinkToStoragePtr StorageKeeperMap::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context, bool /*async_insert*/)
|
||||||
{
|
{
|
||||||
checkTable<true>();
|
checkTable<true>(local_context);
|
||||||
return std::make_shared<StorageKeeperMapSink>(*this, metadata_snapshot->getSampleBlock(), local_context);
|
return std::make_shared<StorageKeeperMapSink>(*this, metadata_snapshot->getSampleBlock(), local_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
void StorageKeeperMap::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &)
|
void StorageKeeperMap::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr local_context, TableExclusiveLockHolder &)
|
||||||
{
|
{
|
||||||
checkTable<true>();
|
checkTable<true>(local_context);
|
||||||
auto client = getClient();
|
const auto & settings = local_context->getSettingsRef();
|
||||||
client->tryRemoveChildrenRecursive(zk_data_path, true);
|
ZooKeeperRetriesControl zk_retry{
|
||||||
|
getName(),
|
||||||
|
getLogger(getName()),
|
||||||
|
ZooKeeperRetriesInfo{
|
||||||
|
settings.keeper_max_retries,
|
||||||
|
settings.keeper_retry_initial_backoff_ms,
|
||||||
|
settings.keeper_retry_max_backoff_ms},
|
||||||
|
local_context->getProcessListElement()};
|
||||||
|
|
||||||
|
zk_retry.retryLoop([&]
|
||||||
|
{
|
||||||
|
auto client = getClient();
|
||||||
|
client->tryRemoveChildrenRecursive(zk_data_path, true);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock)
|
bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock)
|
||||||
@ -605,7 +745,18 @@ bool StorageKeeperMap::dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::E
|
|||||||
|
|
||||||
void StorageKeeperMap::drop()
|
void StorageKeeperMap::drop()
|
||||||
{
|
{
|
||||||
checkTable<true>();
|
auto current_table_status = getTableStatus(getContext());
|
||||||
|
if (current_table_status == TableStatus::UNKNOWN)
|
||||||
|
{
|
||||||
|
static constexpr auto error_msg = "Failed to activate table because of connection issues. It will be activated "
|
||||||
|
"once a connection is established and metadata is verified";
|
||||||
|
throw Exception(ErrorCodes::INVALID_STATE, error_msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// if only column metadata is wrong we can still drop the table correctly
|
||||||
|
if (current_table_status == TableStatus::INVALID_METADATA)
|
||||||
|
return;
|
||||||
|
|
||||||
auto client = getClient();
|
auto client = getClient();
|
||||||
|
|
||||||
// we allow ZNONODE in case we got hardware error on previous drop
|
// we allow ZNONODE in case we got hardware error on previous drop
|
||||||
@ -966,78 +1117,91 @@ UInt64 StorageKeeperMap::keysLimit() const
|
|||||||
return keys_limit;
|
return keys_limit;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<bool> StorageKeeperMap::isTableValid() const
|
StorageKeeperMap::TableStatus StorageKeeperMap::getTableStatus(const ContextPtr & local_context) const
|
||||||
{
|
{
|
||||||
std::lock_guard lock{init_mutex};
|
std::lock_guard lock{init_mutex};
|
||||||
if (table_is_valid.has_value())
|
if (table_status != TableStatus::UNKNOWN)
|
||||||
return table_is_valid;
|
return table_status;
|
||||||
|
|
||||||
[&]
|
[&]
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
auto client = getClient();
|
const auto & settings = local_context->getSettingsRef();
|
||||||
|
ZooKeeperRetriesControl zk_retry{
|
||||||
|
getName(),
|
||||||
|
getLogger(getName()),
|
||||||
|
ZooKeeperRetriesInfo{
|
||||||
|
settings.keeper_max_retries,
|
||||||
|
settings.keeper_retry_initial_backoff_ms,
|
||||||
|
settings.keeper_retry_max_backoff_ms},
|
||||||
|
local_context->getProcessListElement()};
|
||||||
|
|
||||||
Coordination::Stat metadata_stat;
|
zk_retry.retryLoop([&]
|
||||||
auto stored_metadata_string = client->get(zk_metadata_path, &metadata_stat);
|
|
||||||
|
|
||||||
if (metadata_stat.numChildren == 0)
|
|
||||||
{
|
{
|
||||||
table_is_valid = false;
|
auto client = getClient();
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (metadata_string != stored_metadata_string)
|
Coordination::Stat metadata_stat;
|
||||||
{
|
auto stored_metadata_string = client->get(zk_metadata_path, &metadata_stat);
|
||||||
LOG_ERROR(
|
|
||||||
log,
|
|
||||||
"Table definition does not match to the one stored in the path {}. Stored definition: {}",
|
|
||||||
zk_root_path,
|
|
||||||
stored_metadata_string);
|
|
||||||
table_is_valid = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// validate all metadata and data nodes are present
|
if (metadata_stat.numChildren == 0)
|
||||||
Coordination::Requests requests;
|
{
|
||||||
requests.push_back(zkutil::makeCheckRequest(zk_table_path, -1));
|
table_status = TableStatus::INVALID_KEEPER_STRUCTURE;
|
||||||
requests.push_back(zkutil::makeCheckRequest(zk_data_path, -1));
|
return;
|
||||||
requests.push_back(zkutil::makeCheckRequest(zk_dropped_path, -1));
|
}
|
||||||
|
|
||||||
Coordination::Responses responses;
|
if (metadata_string != stored_metadata_string)
|
||||||
client->tryMulti(requests, responses);
|
{
|
||||||
|
LOG_ERROR(
|
||||||
|
log,
|
||||||
|
"Table definition does not match to the one stored in the path {}. Stored definition: {}",
|
||||||
|
zk_root_path,
|
||||||
|
stored_metadata_string);
|
||||||
|
table_status = TableStatus::INVALID_METADATA;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
table_is_valid = false;
|
// validate all metadata and data nodes are present
|
||||||
if (responses[0]->error != Coordination::Error::ZOK)
|
Coordination::Requests requests;
|
||||||
{
|
requests.push_back(zkutil::makeCheckRequest(zk_table_path, -1));
|
||||||
LOG_ERROR(log, "Table node ({}) is missing", zk_table_path);
|
requests.push_back(zkutil::makeCheckRequest(zk_data_path, -1));
|
||||||
return;
|
requests.push_back(zkutil::makeCheckRequest(zk_dropped_path, -1));
|
||||||
}
|
|
||||||
|
|
||||||
if (responses[1]->error != Coordination::Error::ZOK)
|
Coordination::Responses responses;
|
||||||
{
|
client->tryMulti(requests, responses);
|
||||||
LOG_ERROR(log, "Data node ({}) is missing", zk_data_path);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (responses[2]->error == Coordination::Error::ZOK)
|
table_status = TableStatus::INVALID_KEEPER_STRUCTURE;
|
||||||
{
|
if (responses[0]->error != Coordination::Error::ZOK)
|
||||||
LOG_ERROR(log, "Tables with root node {} are being dropped", zk_root_path);
|
{
|
||||||
return;
|
LOG_ERROR(log, "Table node ({}) is missing", zk_table_path);
|
||||||
}
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
table_is_valid = true;
|
if (responses[1]->error != Coordination::Error::ZOK)
|
||||||
|
{
|
||||||
|
LOG_ERROR(log, "Data node ({}) is missing", zk_data_path);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (responses[2]->error == Coordination::Error::ZOK)
|
||||||
|
{
|
||||||
|
LOG_ERROR(log, "Tables with root node {} are being dropped", zk_root_path);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
table_status = TableStatus::VALID;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
catch (const Coordination::Exception & e)
|
catch (const Coordination::Exception & e)
|
||||||
{
|
{
|
||||||
tryLogCurrentException(log);
|
tryLogCurrentException(log);
|
||||||
|
|
||||||
if (!Coordination::isHardwareError(e.code))
|
if (!Coordination::isHardwareError(e.code))
|
||||||
table_is_valid = false;
|
table_status = TableStatus::INVALID_KEEPER_STRUCTURE;
|
||||||
}
|
}
|
||||||
}();
|
}();
|
||||||
|
|
||||||
return table_is_valid;
|
return table_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
Chunk StorageKeeperMap::getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray<UInt8> & null_map, const Names &) const
|
Chunk StorageKeeperMap::getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray<UInt8> & null_map, const Names &) const
|
||||||
@ -1050,10 +1214,11 @@ Chunk StorageKeeperMap::getByKeys(const ColumnsWithTypeAndName & keys, PaddedPOD
|
|||||||
if (raw_keys.size() != keys[0].column->size())
|
if (raw_keys.size() != keys[0].column->size())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Assertion failed: {} != {}", raw_keys.size(), keys[0].column->size());
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Assertion failed: {} != {}", raw_keys.size(), keys[0].column->size());
|
||||||
|
|
||||||
return getBySerializedKeys(raw_keys, &null_map, /* version_column */ false);
|
return getBySerializedKeys(raw_keys, &null_map, /* version_column */ false, getContext());
|
||||||
}
|
}
|
||||||
|
|
||||||
Chunk StorageKeeperMap::getBySerializedKeys(const std::span<const std::string> keys, PaddedPODArray<UInt8> * null_map, bool with_version) const
|
Chunk StorageKeeperMap::getBySerializedKeys(
|
||||||
|
const std::span<const std::string> keys, PaddedPODArray<UInt8> * null_map, bool with_version, const ContextPtr & local_context) const
|
||||||
{
|
{
|
||||||
Block sample_block = getInMemoryMetadataPtr()->getSampleBlock();
|
Block sample_block = getInMemoryMetadataPtr()->getSampleBlock();
|
||||||
MutableColumns columns = sample_block.cloneEmptyColumns();
|
MutableColumns columns = sample_block.cloneEmptyColumns();
|
||||||
@ -1070,17 +1235,27 @@ Chunk StorageKeeperMap::getBySerializedKeys(const std::span<const std::string> k
|
|||||||
null_map->resize_fill(keys.size(), 1);
|
null_map->resize_fill(keys.size(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto client = getClient();
|
|
||||||
|
|
||||||
Strings full_key_paths;
|
Strings full_key_paths;
|
||||||
full_key_paths.reserve(keys.size());
|
full_key_paths.reserve(keys.size());
|
||||||
|
|
||||||
for (const auto & key : keys)
|
for (const auto & key : keys)
|
||||||
{
|
|
||||||
full_key_paths.emplace_back(fullPathForKey(key));
|
full_key_paths.emplace_back(fullPathForKey(key));
|
||||||
}
|
|
||||||
|
|
||||||
auto values = client->tryGet(full_key_paths);
|
const auto & settings = local_context->getSettingsRef();
|
||||||
|
ZooKeeperRetriesControl zk_retry{
|
||||||
|
getName(),
|
||||||
|
getLogger(getName()),
|
||||||
|
ZooKeeperRetriesInfo{
|
||||||
|
settings.keeper_max_retries,
|
||||||
|
settings.keeper_retry_initial_backoff_ms,
|
||||||
|
settings.keeper_retry_max_backoff_ms},
|
||||||
|
local_context->getProcessListElement()};
|
||||||
|
|
||||||
|
zkutil::ZooKeeper::MultiTryGetResponse values;
|
||||||
|
zk_retry.retryLoop([&]{
|
||||||
|
auto client = getClient();
|
||||||
|
values = client->tryGet(full_key_paths);
|
||||||
|
});
|
||||||
|
|
||||||
for (size_t i = 0; i < keys.size(); ++i)
|
for (size_t i = 0; i < keys.size(); ++i)
|
||||||
{
|
{
|
||||||
@ -1153,14 +1328,14 @@ void StorageKeeperMap::checkMutationIsPossible(const MutationCommands & commands
|
|||||||
|
|
||||||
void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr local_context)
|
void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr local_context)
|
||||||
{
|
{
|
||||||
checkTable<true>();
|
checkTable<true>(local_context);
|
||||||
|
|
||||||
if (commands.empty())
|
if (commands.empty())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bool strict = local_context->getSettingsRef().keeper_map_strict_mode;
|
bool strict = local_context->getSettingsRef().keeper_map_strict_mode;
|
||||||
|
|
||||||
assert(commands.size() == 1);
|
chassert(commands.size() == 1);
|
||||||
|
|
||||||
auto metadata_snapshot = getInMemoryMetadataPtr();
|
auto metadata_snapshot = getInMemoryMetadataPtr();
|
||||||
auto storage = getStorageID();
|
auto storage = getStorageID();
|
||||||
@ -1168,16 +1343,16 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca
|
|||||||
|
|
||||||
if (commands.front().type == MutationCommand::Type::DELETE)
|
if (commands.front().type == MutationCommand::Type::DELETE)
|
||||||
{
|
{
|
||||||
MutationsInterpreter::Settings settings(true);
|
MutationsInterpreter::Settings mutation_settings(true);
|
||||||
settings.return_all_columns = true;
|
mutation_settings.return_all_columns = true;
|
||||||
settings.return_mutated_rows = true;
|
mutation_settings.return_mutated_rows = true;
|
||||||
|
|
||||||
auto interpreter = std::make_unique<MutationsInterpreter>(
|
auto interpreter = std::make_unique<MutationsInterpreter>(
|
||||||
storage_ptr,
|
storage_ptr,
|
||||||
metadata_snapshot,
|
metadata_snapshot,
|
||||||
commands,
|
commands,
|
||||||
local_context,
|
local_context,
|
||||||
settings);
|
mutation_settings);
|
||||||
|
|
||||||
auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute());
|
auto pipeline = QueryPipelineBuilder::getPipeline(interpreter->execute());
|
||||||
PullingPipelineExecutor executor(pipeline);
|
PullingPipelineExecutor executor(pipeline);
|
||||||
@ -1186,8 +1361,6 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca
|
|||||||
auto primary_key_pos = header.getPositionByName(primary_key);
|
auto primary_key_pos = header.getPositionByName(primary_key);
|
||||||
auto version_position = header.getPositionByName(std::string{version_column_name});
|
auto version_position = header.getPositionByName(std::string{version_column_name});
|
||||||
|
|
||||||
auto client = getClient();
|
|
||||||
|
|
||||||
Block block;
|
Block block;
|
||||||
while (executor.pull(block))
|
while (executor.pull(block))
|
||||||
{
|
{
|
||||||
@ -1215,7 +1388,23 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca
|
|||||||
}
|
}
|
||||||
|
|
||||||
Coordination::Responses responses;
|
Coordination::Responses responses;
|
||||||
auto status = client->tryMulti(delete_requests, responses, /* check_session_valid */ true);
|
|
||||||
|
const auto & settings = local_context->getSettingsRef();
|
||||||
|
ZooKeeperRetriesControl zk_retry{
|
||||||
|
getName(),
|
||||||
|
getLogger(getName()),
|
||||||
|
ZooKeeperRetriesInfo{
|
||||||
|
settings.keeper_max_retries,
|
||||||
|
settings.keeper_retry_initial_backoff_ms,
|
||||||
|
settings.keeper_retry_max_backoff_ms},
|
||||||
|
local_context->getProcessListElement()};
|
||||||
|
|
||||||
|
Coordination::Error status;
|
||||||
|
zk_retry.retryLoop([&]
|
||||||
|
{
|
||||||
|
auto client = getClient();
|
||||||
|
status = client->tryMulti(delete_requests, responses, /* check_session_valid */ true);
|
||||||
|
});
|
||||||
|
|
||||||
if (status == Coordination::Error::ZOK)
|
if (status == Coordination::Error::ZOK)
|
||||||
return;
|
return;
|
||||||
@ -1227,16 +1416,21 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca
|
|||||||
|
|
||||||
for (const auto & delete_request : delete_requests)
|
for (const auto & delete_request : delete_requests)
|
||||||
{
|
{
|
||||||
auto code = client->tryRemove(delete_request->getPath());
|
zk_retry.retryLoop([&]
|
||||||
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNONODE)
|
{
|
||||||
throw zkutil::KeeperException::fromPath(code, delete_request->getPath());
|
auto client = getClient();
|
||||||
|
status = client->tryRemove(delete_request->getPath());
|
||||||
|
});
|
||||||
|
|
||||||
|
if (status != Coordination::Error::ZOK && status != Coordination::Error::ZNONODE)
|
||||||
|
throw zkutil::KeeperException::fromPath(status, delete_request->getPath());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(commands.front().type == MutationCommand::Type::UPDATE);
|
chassert(commands.front().type == MutationCommand::Type::UPDATE);
|
||||||
if (commands.front().column_to_update_expression.contains(primary_key))
|
if (commands.front().column_to_update_expression.contains(primary_key))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key cannot be updated (cannot update column {})", primary_key);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key cannot be updated (cannot update column {})", primary_key);
|
||||||
|
|
||||||
|
@ -54,7 +54,8 @@ public:
|
|||||||
Names getPrimaryKey() const override { return {primary_key}; }
|
Names getPrimaryKey() const override { return {primary_key}; }
|
||||||
|
|
||||||
Chunk getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray<UInt8> & null_map, const Names &) const override;
|
Chunk getByKeys(const ColumnsWithTypeAndName & keys, PaddedPODArray<UInt8> & null_map, const Names &) const override;
|
||||||
Chunk getBySerializedKeys(std::span<const std::string> keys, PaddedPODArray<UInt8> * null_map, bool with_version) const;
|
Chunk getBySerializedKeys(
|
||||||
|
std::span<const std::string> keys, PaddedPODArray<UInt8> * null_map, bool with_version, const ContextPtr & local_context) const;
|
||||||
|
|
||||||
Block getSampleBlock(const Names &) const override;
|
Block getSampleBlock(const Names &) const override;
|
||||||
|
|
||||||
@ -77,10 +78,10 @@ public:
|
|||||||
UInt64 keysLimit() const;
|
UInt64 keysLimit() const;
|
||||||
|
|
||||||
template <bool throw_on_error>
|
template <bool throw_on_error>
|
||||||
void checkTable() const
|
void checkTable(const ContextPtr & local_context) const
|
||||||
{
|
{
|
||||||
auto is_table_valid = isTableValid();
|
auto current_table_status = getTableStatus(local_context);
|
||||||
if (!is_table_valid.has_value())
|
if (table_status == TableStatus::UNKNOWN)
|
||||||
{
|
{
|
||||||
static constexpr auto error_msg = "Failed to activate table because of connection issues. It will be activated "
|
static constexpr auto error_msg = "Failed to activate table because of connection issues. It will be activated "
|
||||||
"once a connection is established and metadata is verified";
|
"once a connection is established and metadata is verified";
|
||||||
@ -93,10 +94,10 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!*is_table_valid)
|
if (current_table_status != TableStatus::VALID)
|
||||||
{
|
{
|
||||||
static constexpr auto error_msg
|
static constexpr auto error_msg
|
||||||
= "Failed to activate table because of invalid metadata in ZooKeeper. Please DETACH table";
|
= "Failed to activate table because of invalid metadata in ZooKeeper. Please DROP/DETACH table";
|
||||||
if constexpr (throw_on_error)
|
if constexpr (throw_on_error)
|
||||||
throw Exception(ErrorCodes::INVALID_STATE, error_msg);
|
throw Exception(ErrorCodes::INVALID_STATE, error_msg);
|
||||||
else
|
else
|
||||||
@ -110,7 +111,15 @@ public:
|
|||||||
private:
|
private:
|
||||||
bool dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock);
|
bool dropTable(zkutil::ZooKeeperPtr zookeeper, const zkutil::EphemeralNodeHolder::Ptr & metadata_drop_lock);
|
||||||
|
|
||||||
std::optional<bool> isTableValid() const;
|
enum class TableStatus : uint8_t
|
||||||
|
{
|
||||||
|
UNKNOWN,
|
||||||
|
INVALID_METADATA,
|
||||||
|
INVALID_KEEPER_STRUCTURE,
|
||||||
|
VALID
|
||||||
|
};
|
||||||
|
|
||||||
|
TableStatus getTableStatus(const ContextPtr & context) const;
|
||||||
|
|
||||||
void restoreDataImpl(
|
void restoreDataImpl(
|
||||||
const BackupPtr & backup,
|
const BackupPtr & backup,
|
||||||
@ -142,7 +151,8 @@ private:
|
|||||||
mutable zkutil::ZooKeeperPtr zookeeper_client{nullptr};
|
mutable zkutil::ZooKeeperPtr zookeeper_client{nullptr};
|
||||||
|
|
||||||
mutable std::mutex init_mutex;
|
mutable std::mutex init_mutex;
|
||||||
mutable std::optional<bool> table_is_valid;
|
|
||||||
|
mutable TableStatus table_status{TableStatus::UNKNOWN};
|
||||||
|
|
||||||
LoggerPtr log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
|
@ -4,4 +4,4 @@ clickhouse_add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.c
|
|||||||
target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms)
|
target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms)
|
||||||
|
|
||||||
clickhouse_add_executable (columns_description_fuzzer columns_description_fuzzer.cpp)
|
clickhouse_add_executable (columns_description_fuzzer columns_description_fuzzer.cpp)
|
||||||
target_link_libraries (columns_description_fuzzer PRIVATE dbms)
|
target_link_libraries (columns_description_fuzzer PRIVATE dbms clickhouse_functions)
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#include <Storages/ColumnsDescription.h>
|
#include <Storages/ColumnsDescription.h>
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
|
||||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||||
{
|
{
|
||||||
|
@ -0,0 +1,3 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<database_catalog_drop_table_concurrency>256</database_catalog_drop_table_concurrency>
|
||||||
|
</clickhouse>
|
@ -21,6 +21,7 @@ ln -sf $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/
|
|||||||
ln -sf $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/blob_storage_log.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/blob_storage_log.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/custom_settings_prefixes.xml $DEST_SERVER_PATH/config.d/
|
||||||
|
ln -sf $SRC_PATH/config.d/database_catalog_drop_table_concurrency.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/enable_access_control_improvements.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/enable_access_control_improvements.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/macros.xml $DEST_SERVER_PATH/config.d/
|
||||||
ln -sf $SRC_PATH/config.d/secure_ports.xml $DEST_SERVER_PATH/config.d/
|
ln -sf $SRC_PATH/config.d/secure_ports.xml $DEST_SERVER_PATH/config.d/
|
||||||
|
@ -142,7 +142,7 @@ of parallel workers for `pytest-xdist`.
|
|||||||
$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/
|
$ export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=$HOME/ClickHouse/programs/server/
|
||||||
$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse
|
$ export CLICKHOUSE_TESTS_SERVER_BIN_PATH=$HOME/ClickHouse/programs/clickhouse
|
||||||
$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge
|
$ export CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH=$HOME/ClickHouse/programs/clickhouse-odbc-bridge
|
||||||
$ ./runner 'test_storage_s3_queue/test.py::test_max_set_age -- --count 10 -n 5'
|
$ ./runner test_storage_s3_queue/test.py::test_max_set_age --count 10 -n 5
|
||||||
Start tests
|
Start tests
|
||||||
=============================================================================== test session starts ================================================================================
|
=============================================================================== test session starts ================================================================================
|
||||||
platform linux -- Python 3.10.12, pytest-7.4.4, pluggy-1.5.0 -- /usr/bin/python3
|
platform linux -- Python 3.10.12, pytest-7.4.4, pluggy-1.5.0 -- /usr/bin/python3
|
||||||
|
@ -2,7 +2,7 @@ version: '2.3'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
minio1:
|
minio1:
|
||||||
image: minio/minio:RELEASE.2023-09-30T07-02-29Z
|
image: minio/minio:RELEASE.2024-07-31T05-46-26Z
|
||||||
volumes:
|
volumes:
|
||||||
- data1-1:/data1
|
- data1-1:/data1
|
||||||
- ${MINIO_CERTS_DIR:-}:/certs
|
- ${MINIO_CERTS_DIR:-}:/certs
|
||||||
|
@ -3922,7 +3922,11 @@ class ClickHouseInstance:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def contains_in_log(
|
def contains_in_log(
|
||||||
self, substring, from_host=False, filename="clickhouse-server.log"
|
self,
|
||||||
|
substring,
|
||||||
|
from_host=False,
|
||||||
|
filename="clickhouse-server.log",
|
||||||
|
exclusion_substring="",
|
||||||
):
|
):
|
||||||
if from_host:
|
if from_host:
|
||||||
# We check fist file exists but want to look for all rotated logs as well
|
# We check fist file exists but want to look for all rotated logs as well
|
||||||
@ -3930,7 +3934,7 @@ class ClickHouseInstance:
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* || true',
|
f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* | ( [ -z "{exclusion_substring}" ] && cat || grep -v "${exclusion_substring}" ) || true',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@ -3938,7 +3942,7 @@ class ClickHouseInstance:
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} || true',
|
f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} | ( [ -z "{exclusion_substring}" ] && cat || grep -v "${exclusion_substring}" ) || true',
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
return len(result) > 0
|
return len(result) > 0
|
||||||
|
@ -1054,9 +1054,12 @@ def test_mutation():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
assert not has_mutation_in_backup("0000000000", backup_name, "default", "tbl")
|
# mutation #0000000000: "UPDATE x=x+1 WHERE 1" could already finish before starting the backup
|
||||||
|
# mutation #0000000001: "UPDATE x=x+1+sleep(3) WHERE 1"
|
||||||
assert has_mutation_in_backup("0000000001", backup_name, "default", "tbl")
|
assert has_mutation_in_backup("0000000001", backup_name, "default", "tbl")
|
||||||
|
# mutation #0000000002: "UPDATE x=x+1+sleep(3) WHERE 1"
|
||||||
assert has_mutation_in_backup("0000000002", backup_name, "default", "tbl")
|
assert has_mutation_in_backup("0000000002", backup_name, "default", "tbl")
|
||||||
|
# mutation #0000000003: not expected
|
||||||
assert not has_mutation_in_backup("0000000003", backup_name, "default", "tbl")
|
assert not has_mutation_in_backup("0000000003", backup_name, "default", "tbl")
|
||||||
|
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
@ -0,0 +1,16 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<remote_servers>
|
||||||
|
<default>
|
||||||
|
<shard>
|
||||||
|
<replica>
|
||||||
|
<host>writing_node</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
<replica>
|
||||||
|
<host>reading_node</host>
|
||||||
|
<port>9000</port>
|
||||||
|
</replica>
|
||||||
|
</shard>
|
||||||
|
</default>
|
||||||
|
</remote_servers>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,3 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<disable_insertion_and_mutation>true</disable_insertion_and_mutation>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,3 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<disable_insertion_and_mutation>false</disable_insertion_and_mutation>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,75 @@
|
|||||||
|
import pytest
|
||||||
|
from helpers.client import QueryRuntimeException
|
||||||
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
import time
|
||||||
|
|
||||||
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
|
writing_node = cluster.add_instance(
|
||||||
|
"writing_node",
|
||||||
|
main_configs=["config/writing_node.xml", "config/cluster.xml"],
|
||||||
|
with_zookeeper=True,
|
||||||
|
with_minio=True,
|
||||||
|
stay_alive=True,
|
||||||
|
macros={"shard": 1, "replica": 1},
|
||||||
|
)
|
||||||
|
reading_node = cluster.add_instance(
|
||||||
|
"reading_node",
|
||||||
|
main_configs=["config/reading_node.xml", "config/cluster.xml"],
|
||||||
|
with_zookeeper=True,
|
||||||
|
with_minio=True,
|
||||||
|
stay_alive=True,
|
||||||
|
macros={"shard": 1, "replica": 2},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def started_cluster():
|
||||||
|
try:
|
||||||
|
cluster.start()
|
||||||
|
|
||||||
|
yield cluster
|
||||||
|
|
||||||
|
finally:
|
||||||
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
def test_disable_insertion_and_mutation(started_cluster):
|
||||||
|
writing_node.query(
|
||||||
|
"""CREATE TABLE my_table on cluster default (key UInt64, value String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{shard}/default.my_table', '{replica}') ORDER BY key partition by (key % 5) """
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
|
||||||
|
"INSERT INTO my_table VALUES (1, 'hello')"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
|
||||||
|
"INSERT INTO my_table SETTINGS async_insert = 1 VALUES (1, 'hello')"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
|
||||||
|
"ALTER TABLE my_table delete where 1"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
|
||||||
|
"ALTER table my_table update key = 1 where 1"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "QUERY_IS_PROHIBITED" in reading_node.query_and_get_error(
|
||||||
|
"ALTER TABLE my_table drop partition 0"
|
||||||
|
)
|
||||||
|
|
||||||
|
reading_node.query("SELECT * from my_table")
|
||||||
|
writing_node.query("INSERT INTO my_table VALUES (1, 'hello')")
|
||||||
|
writing_node.query("ALTER TABLE my_table delete where 1")
|
||||||
|
writing_node.query("ALTER table my_table update value = 'no hello' where 1")
|
||||||
|
|
||||||
|
reading_node.query("ALTER TABLE my_table ADD COLUMN new_column UInt64")
|
||||||
|
writing_node.query("SELECT new_column from my_table")
|
||||||
|
reading_node.query("SELECT new_column from my_table")
|
||||||
|
|
||||||
|
reading_node.query("ALter Table my_table MODIFY COLUMN new_column String")
|
||||||
|
|
||||||
|
assert "new_column\tString" in reading_node.query("DESC my_table")
|
||||||
|
|
||||||
|
assert "new_column\tString" in writing_node.query("DESC my_table")
|
@ -55,7 +55,7 @@ def test_single_file(started_cluster, cluster):
|
|||||||
path = get_dist_path(cluster, "distr_1", 1)
|
path = get_dist_path(cluster, "distr_1", 1)
|
||||||
query = f"select * from file('{path}/1.bin', 'Distributed')"
|
query = f"select * from file('{path}/1.bin', 'Distributed')"
|
||||||
out = node.exec_in_container(
|
out = node.exec_in_container(
|
||||||
["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query]
|
["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
|
||||||
)
|
)
|
||||||
|
|
||||||
assert out == "1\ta\n2\tbb\n3\tccc\n"
|
assert out == "1\ta\n2\tbb\n3\tccc\n"
|
||||||
@ -65,7 +65,7 @@ def test_single_file(started_cluster, cluster):
|
|||||||
select * from t;
|
select * from t;
|
||||||
"""
|
"""
|
||||||
out = node.exec_in_container(
|
out = node.exec_in_container(
|
||||||
["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query]
|
["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
|
||||||
)
|
)
|
||||||
|
|
||||||
assert out == "1\ta\n2\tbb\n3\tccc\n"
|
assert out == "1\ta\n2\tbb\n3\tccc\n"
|
||||||
@ -106,7 +106,7 @@ def test_two_files(started_cluster, cluster):
|
|||||||
select * from t order by x;
|
select * from t order by x;
|
||||||
"""
|
"""
|
||||||
out = node.exec_in_container(
|
out = node.exec_in_container(
|
||||||
["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query]
|
["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
|
||||||
)
|
)
|
||||||
|
|
||||||
assert out == "0\t_\n1\ta\n2\tbb\n3\tccc\n"
|
assert out == "0\t_\n1\ta\n2\tbb\n3\tccc\n"
|
||||||
@ -141,7 +141,7 @@ def test_single_file_old(started_cluster, cluster):
|
|||||||
select * from t;
|
select * from t;
|
||||||
"""
|
"""
|
||||||
out = node.exec_in_container(
|
out = node.exec_in_container(
|
||||||
["/usr/bin/clickhouse", "local", "--multiquery", "--stacktrace", "-q", query]
|
["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
|
||||||
)
|
)
|
||||||
|
|
||||||
assert out == "1\ta\n2\tbb\n3\tccc\n"
|
assert out == "1\ta\n2\tbb\n3\tccc\n"
|
||||||
|
14
tests/integration/test_keeper_map/configs/keeper_retries.xml
Normal file
14
tests/integration/test_keeper_map/configs/keeper_retries.xml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<profiles>
|
||||||
|
<default>
|
||||||
|
<insert_keeper_max_retries>0</insert_keeper_max_retries>
|
||||||
|
<keeper_max_retries>0</keeper_max_retries>
|
||||||
|
</default>
|
||||||
|
</profiles>
|
||||||
|
<users>
|
||||||
|
<default>
|
||||||
|
<password></password>
|
||||||
|
<profile>default</profile>
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
</clickhouse>
|
@ -10,6 +10,7 @@ cluster = ClickHouseCluster(__file__)
|
|||||||
node = cluster.add_instance(
|
node = cluster.add_instance(
|
||||||
"node",
|
"node",
|
||||||
main_configs=["configs/enable_keeper_map.xml"],
|
main_configs=["configs/enable_keeper_map.xml"],
|
||||||
|
user_configs=["configs/keeper_retries.xml"],
|
||||||
with_zookeeper=True,
|
with_zookeeper=True,
|
||||||
stay_alive=True,
|
stay_alive=True,
|
||||||
)
|
)
|
||||||
@ -46,7 +47,10 @@ def assert_keeper_exception_after_partition(query):
|
|||||||
with PartitionManager() as pm:
|
with PartitionManager() as pm:
|
||||||
pm.drop_instance_zk_connections(node)
|
pm.drop_instance_zk_connections(node)
|
||||||
try:
|
try:
|
||||||
error = node.query_and_get_error_with_retry(query, sleep_time=1)
|
error = node.query_and_get_error_with_retry(
|
||||||
|
query,
|
||||||
|
sleep_time=1,
|
||||||
|
)
|
||||||
assert "Coordination::Exception" in error
|
assert "Coordination::Exception" in error
|
||||||
except:
|
except:
|
||||||
print_iptables_rules()
|
print_iptables_rules()
|
||||||
@ -63,6 +67,7 @@ def run_query(query):
|
|||||||
|
|
||||||
|
|
||||||
def test_keeper_map_without_zk(started_cluster):
|
def test_keeper_map_without_zk(started_cluster):
|
||||||
|
run_query("DROP TABLE IF EXISTS test_keeper_map_without_zk SYNC")
|
||||||
assert_keeper_exception_after_partition(
|
assert_keeper_exception_after_partition(
|
||||||
"CREATE TABLE test_keeper_map_without_zk (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_without_zk') PRIMARY KEY(key);"
|
"CREATE TABLE test_keeper_map_without_zk (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_without_zk') PRIMARY KEY(key);"
|
||||||
)
|
)
|
||||||
@ -84,7 +89,8 @@ def test_keeper_map_without_zk(started_cluster):
|
|||||||
node.restart_clickhouse(60)
|
node.restart_clickhouse(60)
|
||||||
try:
|
try:
|
||||||
error = node.query_and_get_error_with_retry(
|
error = node.query_and_get_error_with_retry(
|
||||||
"SELECT * FROM test_keeper_map_without_zk", sleep_time=1
|
"SELECT * FROM test_keeper_map_without_zk",
|
||||||
|
sleep_time=1,
|
||||||
)
|
)
|
||||||
assert "Failed to activate table because of connection issues" in error
|
assert "Failed to activate table because of connection issues" in error
|
||||||
except:
|
except:
|
||||||
@ -101,12 +107,12 @@ def test_keeper_map_without_zk(started_cluster):
|
|||||||
)
|
)
|
||||||
assert "Failed to activate table because of invalid metadata in ZooKeeper" in error
|
assert "Failed to activate table because of invalid metadata in ZooKeeper" in error
|
||||||
|
|
||||||
node.query("DETACH TABLE test_keeper_map_without_zk")
|
|
||||||
|
|
||||||
client.stop()
|
client.stop()
|
||||||
|
|
||||||
|
|
||||||
def test_keeper_map_with_failed_drop(started_cluster):
|
def test_keeper_map_with_failed_drop(started_cluster):
|
||||||
|
run_query("DROP TABLE IF EXISTS test_keeper_map_with_failed_drop SYNC")
|
||||||
|
run_query("DROP TABLE IF EXISTS test_keeper_map_with_failed_drop_another SYNC")
|
||||||
run_query(
|
run_query(
|
||||||
"CREATE TABLE test_keeper_map_with_failed_drop (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_with_failed_drop') PRIMARY KEY(key);"
|
"CREATE TABLE test_keeper_map_with_failed_drop (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_with_failed_drop') PRIMARY KEY(key);"
|
||||||
)
|
)
|
||||||
|
@ -0,0 +1,3 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<keeper_map_path_prefix>/test_keeper_map</keeper_map_path_prefix>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,7 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<zookeeper>
|
||||||
|
<enable_fault_injections_during_startup>1</enable_fault_injections_during_startup>
|
||||||
|
<send_fault_probability>0.005</send_fault_probability>
|
||||||
|
<recv_fault_probability>0.005</recv_fault_probability>
|
||||||
|
</zookeeper>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,14 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<profiles>
|
||||||
|
<default>
|
||||||
|
<keeper_max_retries>20</keeper_max_retries>
|
||||||
|
<keeper_retry_max_backoff_ms>10000</keeper_retry_max_backoff_ms>
|
||||||
|
</default>
|
||||||
|
</profiles>
|
||||||
|
<users>
|
||||||
|
<default>
|
||||||
|
<password></password>
|
||||||
|
<profile>default</profile>
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
</clickhouse>
|
75
tests/integration/test_keeper_map_retries/test.py
Normal file
75
tests/integration/test_keeper_map_retries/test.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
|
||||||
|
|
||||||
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
|
node = cluster.add_instance(
|
||||||
|
"node",
|
||||||
|
main_configs=["configs/enable_keeper_map.xml"],
|
||||||
|
user_configs=["configs/keeper_retries.xml"],
|
||||||
|
with_zookeeper=True,
|
||||||
|
stay_alive=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def start_clean_clickhouse():
|
||||||
|
# remove fault injection if present
|
||||||
|
if "fault_injection.xml" in node.exec_in_container(
|
||||||
|
["bash", "-c", "ls /etc/clickhouse-server/config.d"]
|
||||||
|
):
|
||||||
|
print("Removing fault injection")
|
||||||
|
node.exec_in_container(
|
||||||
|
["bash", "-c", "rm /etc/clickhouse-server/config.d/fault_injection.xml"]
|
||||||
|
)
|
||||||
|
node.restart_clickhouse()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def started_cluster():
|
||||||
|
try:
|
||||||
|
cluster.start()
|
||||||
|
yield cluster
|
||||||
|
|
||||||
|
finally:
|
||||||
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
def repeat_query(query, repeat):
|
||||||
|
for _ in range(repeat):
|
||||||
|
node.query(
|
||||||
|
query,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_queries(started_cluster):
|
||||||
|
start_clean_clickhouse()
|
||||||
|
|
||||||
|
node.query("DROP TABLE IF EXISTS keeper_map_retries SYNC")
|
||||||
|
node.stop_clickhouse()
|
||||||
|
node.copy_file_to_container(
|
||||||
|
os.path.join(CONFIG_DIR, "fault_injection.xml"),
|
||||||
|
"/etc/clickhouse-server/config.d/fault_injection.xml",
|
||||||
|
)
|
||||||
|
node.start_clickhouse()
|
||||||
|
|
||||||
|
repeat_count = 10
|
||||||
|
|
||||||
|
node.query(
|
||||||
|
"CREATE TABLE keeper_map_retries (a UInt64, b UInt64) Engine=KeeperMap('/keeper_map_retries') PRIMARY KEY a",
|
||||||
|
)
|
||||||
|
|
||||||
|
repeat_query(
|
||||||
|
"INSERT INTO keeper_map_retries SELECT number, number FROM numbers(500)",
|
||||||
|
repeat_count,
|
||||||
|
)
|
||||||
|
repeat_query("SELECT * FROM keeper_map_retries", repeat_count)
|
||||||
|
repeat_query(
|
||||||
|
"ALTER TABLE keeper_map_retries UPDATE b = 3 WHERE a > 2", repeat_count
|
||||||
|
)
|
||||||
|
repeat_query("ALTER TABLE keeper_map_retries DELETE WHERE a > 2", repeat_count)
|
||||||
|
repeat_query("TRUNCATE keeper_map_retries", repeat_count)
|
@ -13,6 +13,7 @@ node = cluster.add_instance(
|
|||||||
with_zookeeper=True,
|
with_zookeeper=True,
|
||||||
with_azurite=True,
|
with_azurite=True,
|
||||||
)
|
)
|
||||||
|
base_search_query = "SELECT COUNT() FROM system.query_log WHERE query LIKE "
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module", autouse=True)
|
@pytest.fixture(scope="module", autouse=True)
|
||||||
@ -35,7 +36,7 @@ def check_logs(must_contain=[], must_not_contain=[]):
|
|||||||
.replace("]", "\\]")
|
.replace("]", "\\]")
|
||||||
.replace("*", "\\*")
|
.replace("*", "\\*")
|
||||||
)
|
)
|
||||||
assert node.contains_in_log(escaped_str)
|
assert node.contains_in_log(escaped_str, exclusion_substring=base_search_query)
|
||||||
|
|
||||||
for str in must_not_contain:
|
for str in must_not_contain:
|
||||||
escaped_str = (
|
escaped_str = (
|
||||||
@ -44,7 +45,9 @@ def check_logs(must_contain=[], must_not_contain=[]):
|
|||||||
.replace("]", "\\]")
|
.replace("]", "\\]")
|
||||||
.replace("*", "\\*")
|
.replace("*", "\\*")
|
||||||
)
|
)
|
||||||
assert not node.contains_in_log(escaped_str)
|
assert not node.contains_in_log(
|
||||||
|
escaped_str, exclusion_substring=base_search_query
|
||||||
|
)
|
||||||
|
|
||||||
for str in must_contain:
|
for str in must_contain:
|
||||||
escaped_str = str.replace("'", "\\'")
|
escaped_str = str.replace("'", "\\'")
|
||||||
@ -60,7 +63,7 @@ def system_query_log_contains_search_pattern(search_pattern):
|
|||||||
return (
|
return (
|
||||||
int(
|
int(
|
||||||
node.query(
|
node.query(
|
||||||
f"SELECT COUNT() FROM system.query_log WHERE query LIKE '%{search_pattern}%'"
|
f"{base_search_query}'%{search_pattern}%' AND query NOT LIKE '{base_search_query}%'"
|
||||||
).strip()
|
).strip()
|
||||||
)
|
)
|
||||||
>= 1
|
>= 1
|
||||||
@ -105,7 +108,6 @@ def test_create_alter_user():
|
|||||||
must_not_contain=[
|
must_not_contain=[
|
||||||
password,
|
password,
|
||||||
"IDENTIFIED BY",
|
"IDENTIFIED BY",
|
||||||
"IDENTIFIED BY",
|
|
||||||
"IDENTIFIED WITH plaintext_password BY",
|
"IDENTIFIED WITH plaintext_password BY",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@ -366,10 +368,7 @@ def test_table_functions():
|
|||||||
f"remoteSecure(named_collection_6, addresses_expr = '127.{{2..11}}', database = 'default', table = 'remote_table', user = 'remote_user', password = '{password}')",
|
f"remoteSecure(named_collection_6, addresses_expr = '127.{{2..11}}', database = 'default', table = 'remote_table', user = 'remote_user', password = '{password}')",
|
||||||
f"s3('http://minio1:9001/root/data/test9.csv.gz', 'NOSIGN', 'CSV')",
|
f"s3('http://minio1:9001/root/data/test9.csv.gz', 'NOSIGN', 'CSV')",
|
||||||
f"s3('http://minio1:9001/root/data/test10.csv.gz', 'minio', '{password}')",
|
f"s3('http://minio1:9001/root/data/test10.csv.gz', 'minio', '{password}')",
|
||||||
(
|
f"deltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')",
|
||||||
f"deltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')",
|
|
||||||
"DNS_ERROR",
|
|
||||||
),
|
|
||||||
f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple.csv', 'CSV')",
|
f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple.csv', 'CSV')",
|
||||||
f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple_1.csv', 'CSV', 'none')",
|
f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple_1.csv', 'CSV', 'none')",
|
||||||
f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple_2.csv', 'CSV', 'none', 'auto')",
|
f"azureBlobStorage('{azure_conn_string}', 'cont', 'test_simple_2.csv', 'CSV', 'none', 'auto')",
|
||||||
|
@ -71,7 +71,7 @@ def test_first_or_random(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -99,7 +99,7 @@ def test_first_or_random(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -127,7 +127,7 @@ def test_first_or_random(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -161,7 +161,7 @@ def test_in_order(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -189,7 +189,7 @@ def test_in_order(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -217,7 +217,7 @@ def test_in_order(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -251,7 +251,7 @@ def test_nearest_hostname(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -279,7 +279,7 @@ def test_nearest_hostname(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo2_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -307,7 +307,7 @@ def test_nearest_hostname(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo3_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo3_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -341,7 +341,7 @@ def test_hostname_levenshtein_distance(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo1_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo1_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -369,7 +369,7 @@ def test_hostname_levenshtein_distance(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo2_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo2_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
@ -397,7 +397,7 @@ def test_hostname_levenshtein_distance(started_cluster):
|
|||||||
[
|
[
|
||||||
"bash",
|
"bash",
|
||||||
"-c",
|
"-c",
|
||||||
"lsof -a -i4 -i6 -itcp -w | grep 'testzookeeperconfigloadbalancing_zoo3_1.*testzookeeperconfigloadbalancing_default:2181' | grep ESTABLISHED | wc -l",
|
"lsof -a -i4 -i6 -itcp -w | grep -P 'testzookeeperconfigloadbalancing_(gw\\d+_)?zoo3_1.*testzookeeperconfigloadbalancing_(gw\\d+_)?default:2181' | grep ESTABLISHED | wc -l",
|
||||||
],
|
],
|
||||||
privileged=True,
|
privileged=True,
|
||||||
user="root",
|
user="root",
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
unnamed columns in tuple
|
||||||
<?xml version='1.0' encoding='UTF-8' ?>
|
<?xml version='1.0' encoding='UTF-8' ?>
|
||||||
<result>
|
<result>
|
||||||
<meta>
|
<meta>
|
||||||
@ -54,3 +55,43 @@
|
|||||||
</extremes>
|
</extremes>
|
||||||
<rows>1</rows>
|
<rows>1</rows>
|
||||||
</result>
|
</result>
|
||||||
|
named columns in tuple
|
||||||
|
<?xml version='1.0' encoding='UTF-8' ?>
|
||||||
|
<result>
|
||||||
|
<meta>
|
||||||
|
<columns>
|
||||||
|
<column>
|
||||||
|
<name>s</name>
|
||||||
|
<type>String</type>
|
||||||
|
</column>
|
||||||
|
<column>
|
||||||
|
<name>time</name>
|
||||||
|
<type>DateTime</type>
|
||||||
|
</column>
|
||||||
|
<column>
|
||||||
|
<name>tpl</name>
|
||||||
|
<type>Tuple(String, DateTime)</type>
|
||||||
|
</column>
|
||||||
|
</columns>
|
||||||
|
</meta>
|
||||||
|
<data>
|
||||||
|
<row>
|
||||||
|
<s>Hello & world</s>
|
||||||
|
<time>2001-02-03 04:05:06</time>
|
||||||
|
<tpl><tuple><elem>Hello & world</elem><elem>2001-02-03 04:05:06</elem></tuple></tpl>
|
||||||
|
</row>
|
||||||
|
</data>
|
||||||
|
<extremes>
|
||||||
|
<min>
|
||||||
|
<s>Hello & world</s>
|
||||||
|
<time>2001-02-03 04:05:06</time>
|
||||||
|
<tpl><tuple><elem>Hello & world</elem><elem>2001-02-03 04:05:06</elem></tuple></tpl>
|
||||||
|
</min>
|
||||||
|
<max>
|
||||||
|
<s>Hello & world</s>
|
||||||
|
<time>2001-02-03 04:05:06</time>
|
||||||
|
<tpl><tuple><elem>Hello & world</elem><elem>2001-02-03 04:05:06</elem></tuple></tpl>
|
||||||
|
</max>
|
||||||
|
</extremes>
|
||||||
|
<rows>1</rows>
|
||||||
|
</result>
|
||||||
|
@ -1,2 +1,5 @@
|
|||||||
SET output_format_write_statistics = 0;
|
SET output_format_write_statistics = 0;
|
||||||
|
SELECT 'unnamed columns in tuple';
|
||||||
SELECT 'Hello & world' AS s, 'Hello\n<World>', toDateTime('2001-02-03 04:05:06') AS time, arrayMap(x -> toString(x), range(10)) AS arr, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML;
|
SELECT 'Hello & world' AS s, 'Hello\n<World>', toDateTime('2001-02-03 04:05:06') AS time, arrayMap(x -> toString(x), range(10)) AS arr, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML;
|
||||||
|
SELECT 'named columns in tuple';
|
||||||
|
SELECT 'Hello & world' AS s, toDateTime('2001-02-03 04:05:06') AS time, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML;
|
||||||
|
Binary file not shown.
@ -9,3 +9,8 @@ SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, a
|
|||||||
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSON;
|
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSON;
|
||||||
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSONCompact;
|
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSONCompact;
|
||||||
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT XML;
|
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT XML;
|
||||||
|
|
||||||
|
SET enable_named_columns_in_function_tuple = 1;
|
||||||
|
|
||||||
|
SELECT 36 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT RowBinaryWithNamesAndTypes SETTINGS allow_experimental_analyzer=1;
|
||||||
|
SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT TabSeparatedWithNamesAndTypes SETTINGS allow_experimental_analyzer=1;
|
||||||
|
@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --multiquery --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true;
|
$CLICKHOUSE_CLIENT --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true;
|
||||||
|
@ -25,7 +25,7 @@ ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_4&se
|
|||||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_5&session_timeout=60" --data-binary "SELECT 1"
|
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_5&session_timeout=60" --data-binary "SELECT 1"
|
||||||
|
|
||||||
echo "Sessions are local per user:"
|
echo "Sessions are local per user:"
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query "DROP USER IF EXISTS test_00463; CREATE USER test_00463; GRANT ALL ON *.* TO test_00463;"
|
${CLICKHOUSE_CLIENT} --query "DROP USER IF EXISTS test_00463; CREATE USER test_00463; GRANT ALL ON *.* TO test_00463;"
|
||||||
|
|
||||||
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6&session_timeout=600" --data-binary "CREATE TEMPORARY TABLE t (s String)"
|
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6&session_timeout=600" --data-binary "CREATE TEMPORARY TABLE t (s String)"
|
||||||
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "INSERT INTO t VALUES ('Hello')"
|
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "INSERT INTO t VALUES ('Hello')"
|
||||||
@ -37,7 +37,7 @@ ${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${C
|
|||||||
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t"
|
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t"
|
||||||
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t"
|
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t"
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query "DROP USER test_00463";
|
${CLICKHOUSE_CLIENT} --query "DROP USER test_00463";
|
||||||
|
|
||||||
echo "And cannot be accessed for a non-existent user:"
|
echo "And cannot be accessed for a non-existent user:"
|
||||||
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" | grep -c -F 'Exception'
|
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&user=test_00463&session_id=${CLICKHOUSE_DATABASE}_6" --data-binary "SELECT * FROM t" | grep -c -F 'Exception'
|
||||||
@ -59,7 +59,7 @@ done
|
|||||||
|
|
||||||
echo "A session successfully expire after a timeout and the session's temporary table shadows the permanent table:"
|
echo "A session successfully expire after a timeout and the session's temporary table shadows the permanent table:"
|
||||||
# An infinite loop is required to make the test reliable. We will check that the timeout corresponds to the observed time at least once
|
# An infinite loop is required to make the test reliable. We will check that the timeout corresponds to the observed time at least once
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE IF EXISTS t; CREATE TABLE t (s String) ENGINE = Memory; INSERT INTO t VALUES ('World');"
|
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS t; CREATE TABLE t (s String) ENGINE = Memory; INSERT INTO t VALUES ('World');"
|
||||||
while true
|
while true
|
||||||
do
|
do
|
||||||
(
|
(
|
||||||
@ -70,7 +70,7 @@ do
|
|||||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_8" --data-binary "SELECT * FROM t"
|
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_8" --data-binary "SELECT * FROM t"
|
||||||
) | tr -d '\n' | grep -F 'HelloWorld' && break || sleep 1
|
) | tr -d '\n' | grep -F 'HelloWorld' && break || sleep 1
|
||||||
done
|
done
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query "DROP TABLE t"
|
${CLICKHOUSE_CLIENT} --query "DROP TABLE t"
|
||||||
|
|
||||||
echo "A session cannot be used by concurrent connections:"
|
echo "A session cannot be used by concurrent connections:"
|
||||||
|
|
||||||
@ -83,5 +83,5 @@ do
|
|||||||
done
|
done
|
||||||
|
|
||||||
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT 1" | grep -c -F 'SESSION_IS_LOCKED'
|
${CLICKHOUSE_CURL} -sS -X POST "${CLICKHOUSE_URL}&session_id=${CLICKHOUSE_DATABASE}_9" --data-binary "SELECT 1" | grep -c -F 'SESSION_IS_LOCKED'
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}_9' SYNC FORMAT Null";
|
${CLICKHOUSE_CLIENT} --query "KILL QUERY WHERE query_id = '${CLICKHOUSE_DATABASE}_9' SYNC FORMAT Null";
|
||||||
wait
|
wait
|
||||||
|
@ -8,8 +8,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
$CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=0 | grep value
|
$CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=0 | grep value
|
||||||
$CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=1 | grep value
|
$CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=1 | grep value
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --readonly=1 --multiquery --query="set output_format_json_quote_64bit_integers=1 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
|
$CLICKHOUSE_CLIENT --readonly=1 --query="set output_format_json_quote_64bit_integers=1 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
|
||||||
$CLICKHOUSE_CLIENT --readonly=1 --multiquery --query="set output_format_json_quote_64bit_integers=0 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
|
$CLICKHOUSE_CLIENT --readonly=1 --query="set output_format_json_quote_64bit_integers=0 ; select toUInt64(pow(2, 63)) as value format JSON" --server_logs_file=/dev/null 2>&1 | grep -o -q 'value\|Cannot modify .* setting in readonly mode' && echo "OK" || echo "FAIL"
|
||||||
|
|
||||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=1" | grep value
|
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=1" | grep value
|
||||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=0" | grep value
|
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=SELECT+toUInt64(pow(2,+63))+as+value+format+JSON&output_format_json_quote_64bit_integers=0" | grep value
|
||||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS pk_in_tuple_perf;
|
DROP TABLE IF EXISTS pk_in_tuple_perf;
|
||||||
CREATE TABLE pk_in_tuple_perf
|
CREATE TABLE pk_in_tuple_perf
|
||||||
(
|
(
|
||||||
@ -27,7 +27,7 @@ $CLICKHOUSE_CLIENT --query "$query FORMAT JSON" | grep "rows_read"
|
|||||||
|
|
||||||
## Test with non-const args in tuple
|
## Test with non-const args in tuple
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS pk_in_tuple_perf_non_const;
|
DROP TABLE IF EXISTS pk_in_tuple_perf_non_const;
|
||||||
CREATE TABLE pk_in_tuple_perf_non_const
|
CREATE TABLE pk_in_tuple_perf_non_const
|
||||||
(
|
(
|
||||||
|
@ -22,7 +22,7 @@ echo '"Hello, world"; 123; "2016-01-01"
|
|||||||
"Hello, ""world"""; "456"; 2016-01-02;
|
"Hello, ""world"""; "456"; 2016-01-02;
|
||||||
Hello "world"; 789 ;2016-01-03
|
Hello "world"; 789 ;2016-01-03
|
||||||
"Hello
|
"Hello
|
||||||
world"; 100; 2016-01-04;' | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSV";
|
world"; 100; 2016-01-04;' | $CLICKHOUSE_CLIENT --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSV";
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";
|
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";
|
||||||
$CLICKHOUSE_CLIENT --format_csv_delimiter=";" --query="SELECT * FROM csv ORDER BY d FORMAT CSV";
|
$CLICKHOUSE_CLIENT --format_csv_delimiter=";" --query="SELECT * FROM csv ORDER BY d FORMAT CSV";
|
||||||
@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s1 String, s2 String) ENGINE = Mem
|
|||||||
|
|
||||||
echo 'abc,def;hello;
|
echo 'abc,def;hello;
|
||||||
hello; world;
|
hello; world;
|
||||||
"hello ""world""";abc,def;' | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSV";
|
"hello ""world""";abc,def;' | $CLICKHOUSE_CLIENT --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSV";
|
||||||
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv";
|
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv";
|
||||||
@ -44,7 +44,7 @@ $CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s1 String, s2 String) ENGINE = Mem
|
|||||||
echo '"s1";"s2"
|
echo '"s1";"s2"
|
||||||
abc,def;hello;
|
abc,def;hello;
|
||||||
hello; world;
|
hello; world;
|
||||||
"hello ""world""";abc,def;' | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSVWithNames";
|
"hello ""world""";abc,def;' | $CLICKHOUSE_CLIENT --query="SET format_csv_delimiter=';'; INSERT INTO csv FORMAT CSVWithNames";
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --format_csv_delimiter=";" --query="SELECT * FROM csv FORMAT CSV";
|
$CLICKHOUSE_CLIENT --format_csv_delimiter=";" --query="SELECT * FROM csv FORMAT CSV";
|
||||||
$CLICKHOUSE_CLIENT --format_csv_delimiter="," --query="SELECT * FROM csv FORMAT CSV";
|
$CLICKHOUSE_CLIENT --format_csv_delimiter="," --query="SELECT * FROM csv FORMAT CSV";
|
||||||
|
@ -18,7 +18,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE csv";
|
|||||||
$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64, d Date) ENGINE = Memory";
|
$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64, d Date) ENGINE = Memory";
|
||||||
|
|
||||||
echo "'single quote' not end, 123, 2016-01-01
|
echo "'single quote' not end, 123, 2016-01-01
|
||||||
'em good, 456, 2016-01-02" | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_allow_single_quotes=0; INSERT INTO csv FORMAT CSV";
|
'em good, 456, 2016-01-02" | $CLICKHOUSE_CLIENT --query="SET format_csv_allow_single_quotes=0; INSERT INTO csv FORMAT CSV";
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";
|
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE csv";
|
|||||||
$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64, d Date) ENGINE = Memory";
|
$CLICKHOUSE_CLIENT --query="CREATE TABLE csv (s String, n UInt64, d Date) ENGINE = Memory";
|
||||||
|
|
||||||
echo '"double quote" not end, 123, 2016-01-01
|
echo '"double quote" not end, 123, 2016-01-01
|
||||||
"em good, 456, 2016-01-02' | $CLICKHOUSE_CLIENT --multiquery --query="SET format_csv_allow_double_quotes=0; INSERT INTO csv FORMAT CSV";
|
"em good, 456, 2016-01-02' | $CLICKHOUSE_CLIENT --query="SET format_csv_allow_double_quotes=0; INSERT INTO csv FORMAT CSV";
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";
|
$CLICKHOUSE_CLIENT --query="SELECT * FROM csv ORDER BY d";
|
||||||
|
|
||||||
|
@ -5,4 +5,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "DROP TABLE IF EXISTS tab_00651; CREATE TABLE tab_00651 (val UInt64) engine = Memory; SHOW CREATE TABLE tab_00651 format abcd; DESC tab_00651; DROP TABLE tab_00651;" 2>/dev/null ||:
|
${CLICKHOUSE_CLIENT} --ignore-error --query "DROP TABLE IF EXISTS tab_00651; CREATE TABLE tab_00651 (val UInt64) engine = Memory; SHOW CREATE TABLE tab_00651 format abcd; DESC tab_00651; DROP TABLE tab_00651;" 2>/dev/null ||:
|
||||||
|
@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --mutations_sync=1 << EOF
|
${CLICKHOUSE_CLIENT} --mutations_sync=1 << EOF
|
||||||
DROP TABLE IF EXISTS mutations;
|
DROP TABLE IF EXISTS mutations;
|
||||||
DROP TABLE IF EXISTS for_subquery;
|
DROP TABLE IF EXISTS for_subquery;
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=./mergetree_mutations.lib
|
# shellcheck source=./mergetree_mutations.lib
|
||||||
. "$CURDIR"/mergetree_mutations.lib
|
. "$CURDIR"/mergetree_mutations.lib
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --allow_nondeterministic_mutations=1 --multiquery << EOF
|
${CLICKHOUSE_CLIENT} --allow_nondeterministic_mutations=1 << EOF
|
||||||
DROP TABLE IF EXISTS mutations_r1;
|
DROP TABLE IF EXISTS mutations_r1;
|
||||||
DROP TABLE IF EXISTS for_subquery;
|
DROP TABLE IF EXISTS for_subquery;
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
DROP TABLE IF EXISTS view_00699;
|
DROP TABLE IF EXISTS view_00699;
|
||||||
DROP TABLE IF EXISTS null_00699;
|
DROP TABLE IF EXISTS null_00699;
|
||||||
|
|
||||||
@ -20,14 +20,14 @@ SELECT count(), min(x), max(x) FROM view_00699;
|
|||||||
|
|
||||||
ALTER TABLE null_00699 DELETE WHERE x % 2 = 0;" --mutations_sync=1
|
ALTER TABLE null_00699 DELETE WHERE x % 2 = 0;" --mutations_sync=1
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
SELECT count(), min(x), max(x) FROM null_00699;
|
SELECT count(), min(x), max(x) FROM null_00699;
|
||||||
SELECT count(), min(x), max(x) FROM view_00699;
|
SELECT count(), min(x), max(x) FROM view_00699;
|
||||||
|
|
||||||
ALTER TABLE view_00699 DELETE WHERE x % 2 = 0;
|
ALTER TABLE view_00699 DELETE WHERE x % 2 = 0;
|
||||||
" --mutations_sync=1
|
" --mutations_sync=1
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
SELECT count(), min(x), max(x) FROM null_00699;
|
SELECT count(), min(x), max(x) FROM null_00699;
|
||||||
SELECT count(), min(x), max(x) FROM view_00699;
|
SELECT count(), min(x), max(x) FROM view_00699;
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ ALTER TABLE null_00699 DELETE WHERE x % 2 = 1;
|
|||||||
ALTER TABLE view_00699 DELETE WHERE x % 2 = 1;
|
ALTER TABLE view_00699 DELETE WHERE x % 2 = 1;
|
||||||
" --mutations_sync=1
|
" --mutations_sync=1
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
SELECT count(), min(x), max(x) FROM null_00699;
|
SELECT count(), min(x), max(x) FROM null_00699;
|
||||||
SELECT count(), min(x), max(x) FROM view_00699;
|
SELECT count(), min(x), max(x) FROM view_00699;
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
DROP TABLE IF EXISTS memory;
|
DROP TABLE IF EXISTS memory;
|
||||||
CREATE TABLE memory (x UInt64) ENGINE = Memory;
|
CREATE TABLE memory (x UInt64) ENGINE = Memory;
|
||||||
|
|
||||||
@ -21,13 +21,13 @@ INSERT INTO memory SELECT * FROM numbers(1000);"
|
|||||||
# But if the table will be dropped before query - just pass.
|
# But if the table will be dropped before query - just pass.
|
||||||
# It's Ok, because otherwise the test will depend on the race condition in the test itself.
|
# It's Ok, because otherwise the test will depend on the race condition in the test itself.
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
SET max_threads = 1;
|
SET max_threads = 1;
|
||||||
SELECT count() FROM memory WHERE NOT ignore(sleep(0.0001));" 2>&1 | grep -c -P '^1000$|^0$|Exception' &
|
SELECT count() FROM memory WHERE NOT ignore(sleep(0.0001));" 2>&1 | grep -c -P '^1000$|^0$|Exception' &
|
||||||
|
|
||||||
sleep 0.05;
|
sleep 0.05;
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
TRUNCATE TABLE memory;
|
TRUNCATE TABLE memory;
|
||||||
DROP TABLE memory;
|
DROP TABLE memory;
|
||||||
"
|
"
|
||||||
|
@ -16,12 +16,12 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE buffer_00763_2 (s String) ENGINE = Bu
|
|||||||
|
|
||||||
function thread1()
|
function thread1()
|
||||||
{
|
{
|
||||||
seq 1 500 | sed -r -e 's/.+/DROP TABLE IF EXISTS mt_00763_2; CREATE TABLE mt_00763_2 (s String) ENGINE = MergeTree ORDER BY s; INSERT INTO mt_00763_2 SELECT toString(number) FROM numbers(10);/' | ${CLICKHOUSE_CLIENT} --fsync-metadata 0 --multiquery --ignore-error ||:
|
seq 1 500 | sed -r -e 's/.+/DROP TABLE IF EXISTS mt_00763_2; CREATE TABLE mt_00763_2 (s String) ENGINE = MergeTree ORDER BY s; INSERT INTO mt_00763_2 SELECT toString(number) FROM numbers(10);/' | ${CLICKHOUSE_CLIENT} --fsync-metadata 0 --ignore-error ||:
|
||||||
}
|
}
|
||||||
|
|
||||||
function thread2()
|
function thread2()
|
||||||
{
|
{
|
||||||
seq 1 500 | sed -r -e 's/.+/SELECT count() FROM buffer_00763_2;/' | ${CLICKHOUSE_CLIENT} --multiquery --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^0$|^10$|^Received exception|^Code: 60|^Code: 218|^Code: 473' | grep -v '(query: '
|
seq 1 500 | sed -r -e 's/.+/SELECT count() FROM buffer_00763_2;/' | ${CLICKHOUSE_CLIENT} --server_logs_file='/dev/null' --ignore-error 2>&1 | grep -vP '^0$|^10$|^Received exception|^Code: 60|^Code: 218|^Code: 473' | grep -v '(query: '
|
||||||
}
|
}
|
||||||
|
|
||||||
thread1 &
|
thread1 &
|
||||||
|
@ -23,7 +23,7 @@ function thread_alter()
|
|||||||
while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 300 ];
|
while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 300 ];
|
||||||
do
|
do
|
||||||
it=$((it+1))
|
it=$((it+1))
|
||||||
$CLICKHOUSE_CLIENT --multiquery --ignore-error -q "
|
$CLICKHOUSE_CLIENT --ignore-error -q "
|
||||||
ALTER TABLE mt_00763_1 MODIFY column s UInt32;
|
ALTER TABLE mt_00763_1 MODIFY column s UInt32;
|
||||||
ALTER TABLE mt_00763_1 MODIFY column s String;
|
ALTER TABLE mt_00763_1 MODIFY column s String;
|
||||||
" ||:
|
" ||:
|
||||||
@ -37,7 +37,7 @@ function thread_query()
|
|||||||
while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 2000 ];
|
while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 2000 ];
|
||||||
do
|
do
|
||||||
it=$((it+1))
|
it=$((it+1))
|
||||||
$CLICKHOUSE_CLIENT --multiquery --ignore-error -q "
|
$CLICKHOUSE_CLIENT --ignore-error -q "
|
||||||
SELECT sum(length(s)) FROM buffer_00763_1;
|
SELECT sum(length(s)) FROM buffer_00763_1;
|
||||||
" 2>&1 | grep -vP '(^3$|^Received exception from server|^Code: 473)'
|
" 2>&1 | grep -vP '(^3$|^Received exception from server|^Code: 473)'
|
||||||
done
|
done
|
||||||
|
@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS array_3dim_protobuf_00825;
|
DROP TABLE IF EXISTS array_3dim_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE array_3dim_protobuf_00825
|
CREATE TABLE array_3dim_protobuf_00825
|
||||||
|
@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS array_of_arrays_protobuf_00825;
|
DROP TABLE IF EXISTS array_of_arrays_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE array_of_arrays_protobuf_00825
|
CREATE TABLE array_of_arrays_protobuf_00825
|
||||||
|
@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS enum_mapping_protobuf_00825;
|
DROP TABLE IF EXISTS enum_mapping_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE enum_mapping_protobuf_00825
|
CREATE TABLE enum_mapping_protobuf_00825
|
||||||
|
@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS map_protobuf_00825;
|
DROP TABLE IF EXISTS map_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE map_protobuf_00825
|
CREATE TABLE map_protobuf_00825
|
||||||
|
@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS nested_in_nested_protobuf_00825;
|
DROP TABLE IF EXISTS nested_in_nested_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE nested_in_nested_protobuf_00825 (x Nested (y Nested (z Int64))) ENGINE = MergeTree ORDER BY tuple();
|
CREATE TABLE nested_in_nested_protobuf_00825 (x Nested (y Nested (z Int64))) ENGINE = MergeTree ORDER BY tuple();
|
||||||
|
@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS nested_optional_protobuf_00825;
|
DROP TABLE IF EXISTS nested_optional_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE nested_optional_protobuf_00825
|
CREATE TABLE nested_optional_protobuf_00825
|
||||||
|
@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS no_length_delimiter_protobuf_00825;
|
DROP TABLE IF EXISTS no_length_delimiter_protobuf_00825;
|
||||||
DROP TABLE IF EXISTS roundtrip_no_length_delimiter_protobuf_00825;
|
DROP TABLE IF EXISTS roundtrip_no_length_delimiter_protobuf_00825;
|
||||||
|
|
||||||
@ -43,11 +43,11 @@ $CLICKHOUSE_CLIENT --query "SELECT * FROM roundtrip_no_length_delimiter_protobuf
|
|||||||
rm "$BINARY_FILE_PATH"
|
rm "$BINARY_FILE_PATH"
|
||||||
|
|
||||||
# The ProtobufSingle format can't be used to write multiple rows because this format doesn't have any row delimiter.
|
# The ProtobufSingle format can't be used to write multiple rows because this format doesn't have any row delimiter.
|
||||||
$CLICKHOUSE_CLIENT --multiquery > /dev/null <<EOF
|
$CLICKHOUSE_CLIENT > /dev/null <<EOF
|
||||||
SELECT * FROM no_length_delimiter_protobuf_00825 FORMAT ProtobufSingle SETTINGS format_schema = '$SCHEMADIR/00825_protobuf_format_no_length_delimiter:Message'; -- { clientError 546 }
|
SELECT * FROM no_length_delimiter_protobuf_00825 FORMAT ProtobufSingle SETTINGS format_schema = '$SCHEMADIR/00825_protobuf_format_no_length_delimiter:Message'; -- { clientError 546 }
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE no_length_delimiter_protobuf_00825;
|
DROP TABLE no_length_delimiter_protobuf_00825;
|
||||||
DROP TABLE roundtrip_no_length_delimiter_protobuf_00825;
|
DROP TABLE roundtrip_no_length_delimiter_protobuf_00825;
|
||||||
EOF
|
EOF
|
||||||
|
@ -17,7 +17,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS persons_00825;
|
DROP TABLE IF EXISTS persons_00825;
|
||||||
DROP TABLE IF EXISTS roundtrip_persons_00825;
|
DROP TABLE IF EXISTS roundtrip_persons_00825;
|
||||||
DROP TABLE IF EXISTS alt_persons_00825;
|
DROP TABLE IF EXISTS alt_persons_00825;
|
||||||
@ -129,7 +129,7 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO edition2023_persons_00825 SETTINGS forma
|
|||||||
$CLICKHOUSE_CLIENT --query "SELECT * FROM edition2023_persons_00825 ORDER BY name"
|
$CLICKHOUSE_CLIENT --query "SELECT * FROM edition2023_persons_00825 ORDER BY name"
|
||||||
rm "$BINARY_FILE_PATH"
|
rm "$BINARY_FILE_PATH"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE persons_00825;
|
DROP TABLE persons_00825;
|
||||||
DROP TABLE roundtrip_persons_00825;
|
DROP TABLE roundtrip_persons_00825;
|
||||||
DROP TABLE alt_persons_00825;
|
DROP TABLE alt_persons_00825;
|
||||||
|
@ -11,7 +11,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS table_skipped_column_in_nested_00825;
|
DROP TABLE IF EXISTS table_skipped_column_in_nested_00825;
|
||||||
|
|
||||||
CREATE TABLE table_skipped_column_in_nested_00825 (
|
CREATE TABLE table_skipped_column_in_nested_00825 (
|
||||||
|
@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS splitted_nested_protobuf_00825;
|
DROP TABLE IF EXISTS splitted_nested_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE splitted_nested_protobuf_00825 (
|
CREATE TABLE splitted_nested_protobuf_00825 (
|
||||||
|
@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS squares_protobuf_00825;
|
DROP TABLE IF EXISTS squares_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE squares_protobuf_00825 (number UInt32, square UInt64) ENGINE = MergeTree ORDER BY tuple();
|
CREATE TABLE squares_protobuf_00825 (number UInt32, square UInt64) ENGINE = MergeTree ORDER BY tuple();
|
||||||
|
@ -9,7 +9,7 @@ SCHEMADIR=$CURDIR/format_schemas
|
|||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
# Run the client.
|
# Run the client.
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS table_default_protobuf_00825;
|
DROP TABLE IF EXISTS table_default_protobuf_00825;
|
||||||
|
|
||||||
CREATE TABLE table_default_protobuf_00825
|
CREATE TABLE table_default_protobuf_00825
|
||||||
|
@ -58,7 +58,7 @@ for NAME in $(find "$DATA_DIR"/*.parquet -print0 | xargs -0 -n 1 basename | LC_A
|
|||||||
COLUMNS=$(cat "$COLUMNS_FILE") || continue
|
COLUMNS=$(cat "$COLUMNS_FILE") || continue
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load"
|
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS parquet_load"
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
CREATE TABLE parquet_load ($COLUMNS) ENGINE = Memory;
|
CREATE TABLE parquet_load ($COLUMNS) ENGINE = Memory;
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
. "$CUR_DIR"/../shell_config.sh
|
. "$CUR_DIR"/../shell_config.sh
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS maps"
|
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS maps"
|
||||||
${CLICKHOUSE_CLIENT} --multiquery <<EOF
|
${CLICKHOUSE_CLIENT} <<EOF
|
||||||
CREATE TABLE maps (m1 Map(UInt32, UInt32), m2 Map(String, String), m3 Map(UInt32, Tuple(UInt32, UInt32)), m4 Map(UInt32, Array(UInt32)), m5 Array(Map(UInt32, UInt32)), m6 Tuple(Map(UInt32, UInt32), Map(String, String)), m7 Array(Map(UInt32, Array(Tuple(Map(UInt32, UInt32), Tuple(UInt32)))))) ENGINE=Memory();
|
CREATE TABLE maps (m1 Map(UInt32, UInt32), m2 Map(String, String), m3 Map(UInt32, Tuple(UInt32, UInt32)), m4 Map(UInt32, Array(UInt32)), m5 Array(Map(UInt32, UInt32)), m6 Tuple(Map(UInt32, UInt32), Map(String, String)), m7 Array(Map(UInt32, Array(Tuple(Map(UInt32, UInt32), Tuple(UInt32)))))) ENGINE=Memory();
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ echo -e "\n"
|
|||||||
# Test that if both format_template_row_format setting and format_template_row are provided, error is thrown
|
# Test that if both format_template_row_format setting and format_template_row are provided, error is thrown
|
||||||
row_format_file="$CURDIR"/"${CLICKHOUSE_TEST_UNIQUE_NAME}"_template_output_format_row.tmp
|
row_format_file="$CURDIR"/"${CLICKHOUSE_TEST_UNIQUE_NAME}"_template_output_format_row.tmp
|
||||||
echo -ne 'Question: ${question:Quoted}, Answer: ${answer:Quoted}, Number of Likes: ${likes:Raw}, Date: ${date:Raw}' > $row_format_file
|
echo -ne 'Question: ${question:Quoted}, Answer: ${answer:Quoted}, Number of Likes: ${likes:Raw}, Date: ${date:Raw}' > $row_format_file
|
||||||
$CLICKHOUSE_CLIENT --multiline --multiquery --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \
|
$CLICKHOUSE_CLIENT --multiline --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \
|
||||||
format_template_row = '$row_format_file', \
|
format_template_row = '$row_format_file', \
|
||||||
format_template_row_format = 'Question: \${question:Quoted}, Answer: \${answer:Quoted}, Number of Likes: \${likes:Raw}, Date: \${date:Raw}', \
|
format_template_row_format = 'Question: \${question:Quoted}, Answer: \${answer:Quoted}, Number of Likes: \${likes:Raw}, Date: \${date:Raw}', \
|
||||||
format_template_rows_between_delimiter = ';\n'; --{clientError 474}"
|
format_template_rows_between_delimiter = ';\n'; --{clientError 474}"
|
||||||
@ -38,7 +38,7 @@ format_template_rows_between_delimiter = ';\n'";
|
|||||||
# Test that if both format_template_result_format setting and format_template_resultset are provided, error is thrown
|
# Test that if both format_template_result_format setting and format_template_resultset are provided, error is thrown
|
||||||
resultset_output_file="$CURDIR"/"$CLICKHOUSE_TEST_UNIQUE_NAME"_template_output_format_resultset.tmp
|
resultset_output_file="$CURDIR"/"$CLICKHOUSE_TEST_UNIQUE_NAME"_template_output_format_resultset.tmp
|
||||||
echo -ne '===== Resultset ===== \n \${data} \n ===============' > $resultset_output_file
|
echo -ne '===== Resultset ===== \n \${data} \n ===============' > $resultset_output_file
|
||||||
$CLICKHOUSE_CLIENT --multiline --multiquery --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \
|
$CLICKHOUSE_CLIENT --multiline --query "SELECT * FROM template GROUP BY question, answer, likes, date WITH TOTALS ORDER BY date LIMIT 3 FORMAT Template SETTINGS \
|
||||||
format_template_resultset = '$resultset_output_file', \
|
format_template_resultset = '$resultset_output_file', \
|
||||||
format_template_resultset_format = '===== Resultset ===== \n \${data} \n ===============', \
|
format_template_resultset_format = '===== Resultset ===== \n \${data} \n ===============', \
|
||||||
format_template_row_format = 'Question: \${question:Quoted}, Answer: \${answer:Quoted}, Number of Likes: \${likes:Raw}, Date: \${date:Raw}', \
|
format_template_row_format = 'Question: \${question:Quoted}, Answer: \${answer:Quoted}, Number of Likes: \${likes:Raw}, Date: \${date:Raw}', \
|
||||||
|
@ -17,7 +17,7 @@ echo 1
|
|||||||
# normal execution
|
# normal execution
|
||||||
$CLICKHOUSE_CLIENT \
|
$CLICKHOUSE_CLIENT \
|
||||||
--query="SELECT 'find_me_TOPSECRET=TOPSECRET' FROM numbers(1) FORMAT Null" \
|
--query="SELECT 'find_me_TOPSECRET=TOPSECRET' FROM numbers(1) FORMAT Null" \
|
||||||
--log_queries=1 --ignore-error --multiquery >"$tmp_file" 2>&1
|
--log_queries=1 --ignore-error >"$tmp_file" 2>&1
|
||||||
|
|
||||||
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 1a'
|
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 1a'
|
||||||
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 1b'
|
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 1b'
|
||||||
@ -38,7 +38,7 @@ echo 3
|
|||||||
# failure at before query start
|
# failure at before query start
|
||||||
$CLICKHOUSE_CLIENT \
|
$CLICKHOUSE_CLIENT \
|
||||||
--query="SELECT 1 FROM system.numbers WHERE credit_card_number='find_me_TOPSECRET=TOPSECRET' FORMAT Null" \
|
--query="SELECT 1 FROM system.numbers WHERE credit_card_number='find_me_TOPSECRET=TOPSECRET' FORMAT Null" \
|
||||||
--log_queries=1 --ignore-error --multiquery |& grep -v '^(query: ' > "$tmp_file"
|
--log_queries=1 --ignore-error |& grep -v '^(query: ' > "$tmp_file"
|
||||||
|
|
||||||
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 3a'
|
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 3a'
|
||||||
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 3b'
|
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 3b'
|
||||||
@ -56,7 +56,7 @@ echo 4
|
|||||||
# failure at the end of query
|
# failure at the end of query
|
||||||
$CLICKHOUSE_CLIENT \
|
$CLICKHOUSE_CLIENT \
|
||||||
--query="SELECT 'find_me_TOPSECRET=TOPSECRET', intDiv( 100, number - 10) FROM numbers(11) FORMAT Null" \
|
--query="SELECT 'find_me_TOPSECRET=TOPSECRET', intDiv( 100, number - 10) FROM numbers(11) FORMAT Null" \
|
||||||
--log_queries=1 --ignore-error --max_block_size=2 --multiquery |& grep -v '^(query: ' > "$tmp_file"
|
--log_queries=1 --ignore-error --max_block_size=2 |& grep -v '^(query: ' > "$tmp_file"
|
||||||
|
|
||||||
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 4a'
|
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 4a'
|
||||||
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 4b'
|
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 4b'
|
||||||
@ -67,7 +67,7 @@ rm -f "$tmp_file2" >/dev/null 2>&1
|
|||||||
bash -c "$CLICKHOUSE_CLIENT \
|
bash -c "$CLICKHOUSE_CLIENT \
|
||||||
--function_sleep_max_microseconds_per_block 60000000 \
|
--function_sleep_max_microseconds_per_block 60000000 \
|
||||||
--query=\"select sleepEachRow(1) from numbers(10) where ignore('find_me_TOPSECRET=TOPSECRET')=0 and ignore('fwerkh_that_magic_string_make_me_unique') = 0 FORMAT Null\" \
|
--query=\"select sleepEachRow(1) from numbers(10) where ignore('find_me_TOPSECRET=TOPSECRET')=0 and ignore('fwerkh_that_magic_string_make_me_unique') = 0 FORMAT Null\" \
|
||||||
--log_queries=1 --ignore-error --multiquery |& grep -v '^(query: ' > $tmp_file2" &
|
--log_queries=1 --ignore-error |& grep -v '^(query: ' > $tmp_file2" &
|
||||||
|
|
||||||
rm -f "$tmp_file" >/dev/null 2>&1
|
rm -f "$tmp_file" >/dev/null 2>&1
|
||||||
# check that executing query doesn't expose secrets in processlist
|
# check that executing query doesn't expose secrets in processlist
|
||||||
@ -133,7 +133,7 @@ insert into sensitive select number as id, toDate('2019-01-01') as date, 'abcd'
|
|||||||
insert into sensitive select number as id, toDate('2019-01-01') as date, 'find_me_TOPSECRET=TOPSECRET' as value1, rand() as valuer from numbers(10);
|
insert into sensitive select number as id, toDate('2019-01-01') as date, 'find_me_TOPSECRET=TOPSECRET' as value1, rand() as valuer from numbers(10);
|
||||||
insert into sensitive select number as id, toDate('2019-01-01') as date, 'abcd' as value1, rand() as valuer from numbers(10000);
|
insert into sensitive select number as id, toDate('2019-01-01') as date, 'abcd' as value1, rand() as valuer from numbers(10000);
|
||||||
select * from sensitive WHERE value1 = 'find_me_TOPSECRET=TOPSECRET' FORMAT Null;
|
select * from sensitive WHERE value1 = 'find_me_TOPSECRET=TOPSECRET' FORMAT Null;
|
||||||
drop table sensitive;" --log_queries=1 --ignore-error --multiquery >"$tmp_file" 2>&1
|
drop table sensitive;" --log_queries=1 --ignore-error >"$tmp_file" 2>&1
|
||||||
|
|
||||||
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 8a'
|
grep -F 'find_me_[hidden]' "$tmp_file" >/dev/null || echo 'fail 8a'
|
||||||
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 8b'
|
grep -F 'TOPSECRET' "$tmp_file" && echo 'fail 8b'
|
||||||
@ -144,7 +144,7 @@ echo 9
|
|||||||
$CLICKHOUSE_CLIENT \
|
$CLICKHOUSE_CLIENT \
|
||||||
--server_logs_file=/dev/null \
|
--server_logs_file=/dev/null \
|
||||||
--query="SELECT if( count() > 0, 'text_log non empty', 'text_log empty') FROM system.text_log WHERE event_date >= yesterday() and message like '%find_me%';
|
--query="SELECT if( count() > 0, 'text_log non empty', 'text_log empty') FROM system.text_log WHERE event_date >= yesterday() and message like '%find_me%';
|
||||||
select * from system.text_log where event_date >= yesterday() and message like '%TOPSECRET=TOPSECRET%';" --ignore-error --multiquery
|
select * from system.text_log where event_date >= yesterday() and message like '%TOPSECRET=TOPSECRET%';" --ignore-error
|
||||||
|
|
||||||
echo 'finish'
|
echo 'finish'
|
||||||
rm -f "$tmp_file" >/dev/null 2>&1
|
rm -f "$tmp_file" >/dev/null 2>&1
|
||||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
. "$CURDIR"/../shell_config.sh
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
DROP TABLE IF EXISTS src_a;
|
DROP TABLE IF EXISTS src_a;
|
||||||
DROP TABLE IF EXISTS src_b;
|
DROP TABLE IF EXISTS src_b;
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
|
|
||||||
# that test is failing on versions <= 19.11.12
|
# that test is failing on versions <= 19.11.12
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
DROP TABLE IF EXISTS lc_empty_part_bug;
|
DROP TABLE IF EXISTS lc_empty_part_bug;
|
||||||
create table lc_empty_part_bug (id UInt64, s String) Engine=MergeTree ORDER BY id SETTINGS number_of_free_entries_in_pool_to_execute_mutation=0;
|
create table lc_empty_part_bug (id UInt64, s String) Engine=MergeTree ORDER BY id SETTINGS number_of_free_entries_in_pool_to_execute_mutation=0;
|
||||||
insert into lc_empty_part_bug select number as id, toString(rand()) from numbers(100);
|
insert into lc_empty_part_bug select number as id, toString(rand()) from numbers(100);
|
||||||
@ -15,7 +15,7 @@ ${CLICKHOUSE_CLIENT} --multiquery --query="
|
|||||||
|
|
||||||
echo 'Waited for mutation to finish'
|
echo 'Waited for mutation to finish'
|
||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiquery --query="
|
${CLICKHOUSE_CLIENT} --query="
|
||||||
alter table lc_empty_part_bug modify column s LowCardinality(String);
|
alter table lc_empty_part_bug modify column s LowCardinality(String);
|
||||||
SELECT 'still alive';
|
SELECT 'still alive';
|
||||||
insert into lc_empty_part_bug select number+100 as id, toString(rand()) from numbers(100);
|
insert into lc_empty_part_bug select number+100 as id, toString(rand()) from numbers(100);
|
||||||
|
@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
|
|
||||||
set -e -o pipefail
|
set -e -o pipefail
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --multiquery <<EOF
|
$CLICKHOUSE_CLIENT <<EOF
|
||||||
CREATE TABLE ${CLICKHOUSE_DATABASE}.table(x Int64, y Int64, insert_time DateTime) ENGINE = MergeTree ORDER BY tuple();
|
CREATE TABLE ${CLICKHOUSE_DATABASE}.table(x Int64, y Int64, insert_time DateTime) ENGINE = MergeTree ORDER BY tuple();
|
||||||
INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (12, 102, now());
|
INSERT INTO ${CLICKHOUSE_DATABASE}.table VALUES (12, 102, now());
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -9,7 +9,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -9,7 +9,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -9,7 +9,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -8,7 +8,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -8,7 +8,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -8,7 +8,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -8,7 +8,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -8,7 +8,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -8,7 +8,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -8,7 +8,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
@ -8,7 +8,7 @@ opts=(
|
|||||||
"--allow_experimental_analyzer=0"
|
"--allow_experimental_analyzer=0"
|
||||||
)
|
)
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
|
$CLICKHOUSE_CLIENT "${opts[@]}" <<EOF
|
||||||
SET allow_experimental_window_view = 1;
|
SET allow_experimental_window_view = 1;
|
||||||
DROP TABLE IF EXISTS mt;
|
DROP TABLE IF EXISTS mt;
|
||||||
DROP TABLE IF EXISTS dst;
|
DROP TABLE IF EXISTS dst;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user