mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-17 03:42:48 +00:00
Merge remote-tracking branch 'origin/master' into curange
This commit is contained in:
commit
a2f319f0ad
@ -22,7 +22,6 @@
|
||||
|
||||
#### New Feature
|
||||
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
|
||||
* Add new window function `percent_rank`. [#62747](https://github.com/ClickHouse/ClickHouse/pull/62747) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Support JWT authentication in `clickhouse-client` (will be available only in ClickHouse Cloud). [#62829](https://github.com/ClickHouse/ClickHouse/pull/62829) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Add SQL functions `changeYear`, `changeMonth`, `changeDay`, `changeHour`, `changeMinute`, `changeSecond`. For example, `SELECT changeMonth(toDate('2024-06-14'), 7)` returns date `2024-07-14`. [#63186](https://github.com/ClickHouse/ClickHouse/pull/63186) ([cucumber95](https://github.com/cucumber95)).
|
||||
* Introduce startup scripts, which allow the execution of preconfigured queries at the startup stage. [#64889](https://github.com/ClickHouse/ClickHouse/pull/64889) ([pufit](https://github.com/pufit)).
|
||||
|
@ -34,17 +34,13 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
|
||||
|
||||
* [v24.7 Community Call](https://clickhouse.com/company/events/v24-7-community-release-call) - Jul 30
|
||||
* [v24.8 Community Call](https://clickhouse.com/company/events/v24-8-community-release-call) - August 29
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||
|
||||
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
|
||||
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
|
||||
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
|
||||
* [AWS Summit in New York](https://clickhouse.com/company/events/2024-07-awssummit-nyc) - Jul 10
|
||||
* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11
|
||||
* MORE COMING SOON!
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -57,7 +57,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat
|
||||
|
||||
if (WITH_COVERAGE)
|
||||
message (STATUS "Enabled instrumentation for code coverage")
|
||||
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
||||
set(COVERAGE_FLAGS "SHELL:-fprofile-instr-generate -fcoverage-mapping")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
||||
endif()
|
||||
|
||||
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
||||
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884
|
||||
Subproject commit 67272b7ee0adff6b69921b26eb071ba1a353062c
|
2
contrib/libprotobuf-mutator
vendored
2
contrib/libprotobuf-mutator
vendored
@ -1 +1 @@
|
||||
Subproject commit a304ec48dcf15d942607032151f7e9ee504b5dcf
|
||||
Subproject commit 1f95f8083066f5b38fd2db172e7e7f9aa7c49d2d
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit be366233921293bd07a84dc4ea6991858665f202
|
||||
Subproject commit 01e43568fa9f3f7bf107b2b66c00b286b456f33e
|
@ -5,6 +5,9 @@ if (NOT ENABLE_ROCKSDB)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# not in original build system, otherwise xxHash.cc fails to compile with ClickHouse C++23 default
|
||||
set (CMAKE_CXX_STANDARD 20)
|
||||
|
||||
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||
|
||||
@ -16,14 +19,6 @@ option(WITH_LZ4 "build with lz4" ON)
|
||||
option(WITH_ZLIB "build with zlib" ON)
|
||||
option(WITH_ZSTD "build with zstd" ON)
|
||||
|
||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
||||
# So only turn it on there by default.
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
endif()
|
||||
|
||||
if(WITH_SNAPPY)
|
||||
add_definitions(-DSNAPPY)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||
@ -44,7 +39,7 @@ if(WITH_ZSTD)
|
||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||
endif()
|
||||
|
||||
option(PORTABLE "build a portable binary" ON)
|
||||
add_definitions(-DROCKSDB_PORTABLE)
|
||||
|
||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||
add_definitions(-DHAVE_SSE42)
|
||||
@ -59,11 +54,6 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
||||
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
endif()
|
||||
|
||||
set (HAVE_THREAD_LOCAL 1)
|
||||
if(HAVE_THREAD_LOCAL)
|
||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
add_definitions(-DOS_MACOSX)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
@ -89,19 +79,21 @@ set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||
|
||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
||||
endif()
|
||||
|
||||
set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_helpers.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/charged_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/compressed_secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/secondary_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_contents.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_fetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||
@ -113,6 +105,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_source.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||
@ -124,7 +117,11 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_service_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_state.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_outputs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/subcompaction_state.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/compacted_db_impl.cc
|
||||
@ -159,10 +156,11 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/output_validator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/periodic_work_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/periodic_task_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/seqno_to_time_mapping.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
||||
@ -174,6 +172,8 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wide/wide_column_serialization.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wide/wide_columns.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
||||
@ -182,7 +182,6 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||
@ -233,16 +232,17 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/mmap.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
||||
@ -300,9 +300,12 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/async_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/cleanable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compression.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||
@ -311,16 +314,17 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/regex.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/stderr_logger.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/agg_merge/agg_merge.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/backup/backup_engine.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
||||
@ -335,6 +339,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/counted_fs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||
@ -422,15 +427,6 @@ list(APPEND SOURCES
|
||||
"${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc"
|
||||
"${ROCKSDB_SOURCE_DIR}/env/io_posix.cc")
|
||||
|
||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
list(APPEND SOURCES
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp"
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp"
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp"
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp"
|
||||
"${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp")
|
||||
endif()
|
||||
|
||||
add_library(_rocksdb ${SOURCES})
|
||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||
|
@ -1,16 +1,33 @@
|
||||
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
||||
/// This file was edited for ClickHouse.
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "rocksdb/version.h"
|
||||
#include "rocksdb/utilities/object_registry.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
// The build script may replace these values with real values based
|
||||
// on whether or not GIT is available and the platform settings
|
||||
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:0";
|
||||
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:master";
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:2000-01-01";
|
||||
static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:72438a678872544809393b831c7273794c074215";
|
||||
static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:main";
|
||||
#define HAS_GIT_CHANGES 0
|
||||
#if HAS_GIT_CHANGES == 0
|
||||
// If HAS_GIT_CHANGES is 0, the GIT date is used.
|
||||
// Use the time the branch/tag was last modified
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-12 16:01:57";
|
||||
#else
|
||||
// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications.
|
||||
// Use the time the build was created.
|
||||
static const std::string rocksdb_build_date = "rocksdb_build_date:2024-07-13 17:15:50";
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
|
||||
} // extern "C"
|
||||
|
||||
std::unordered_map<std::string, ROCKSDB_NAMESPACE::RegistrarFunc> ROCKSDB_NAMESPACE::ObjectRegistry::builtins_ = {
|
||||
|
||||
};
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
static void AddProperty(std::unordered_map<std::string, std::string> *props, const std::string& name) {
|
||||
@ -39,12 +56,12 @@ const std::unordered_map<std::string, std::string>& GetRocksBuildProperties() {
|
||||
}
|
||||
|
||||
std::string GetRocksVersionAsString(bool with_patch) {
|
||||
std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR);
|
||||
std::string version = std::to_string(ROCKSDB_MAJOR) + "." + std::to_string(ROCKSDB_MINOR);
|
||||
if (with_patch) {
|
||||
return version + "." + ToString(ROCKSDB_PATCH);
|
||||
return version + "." + std::to_string(ROCKSDB_PATCH);
|
||||
} else {
|
||||
return version;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) {
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.7.1.2915"
|
||||
ARG VERSION="24.7.2.13"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.7.1.2915"
|
||||
ARG VERSION="24.7.2.13"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.7.1.2915"
|
||||
ARG VERSION="24.7.2.13"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
#docker-official-library:off
|
||||
|
40
docs/changelogs/v24.3.5.46-lts.md
Normal file
40
docs/changelogs/v24.3.5.46-lts.md
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.5.46-lts (fe54cead6b6) FIXME as compared to v24.3.4.147-lts (31a7bdc346d)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#65463](https://github.com/ClickHouse/ClickHouse/issues/65463): Reload certificate chain during certificate reload. [#61671](https://github.com/ClickHouse/ClickHouse/pull/61671) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
|
||||
* Backported in [#65882](https://github.com/ClickHouse/ClickHouse/issues/65882): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#65302](https://github.com/ClickHouse/ClickHouse/issues/65302): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Backported in [#65892](https://github.com/ClickHouse/ClickHouse/issues/65892): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#65283](https://github.com/ClickHouse/ClickHouse/issues/65283): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#65370](https://github.com/ClickHouse/ClickHouse/issues/65370): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Backported in [#65446](https://github.com/ClickHouse/ClickHouse/issues/65446): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#65708](https://github.com/ClickHouse/ClickHouse/issues/65708): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#65352](https://github.com/ClickHouse/ClickHouse/issues/65352): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#65327](https://github.com/ClickHouse/ClickHouse/issues/65327): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#65538](https://github.com/ClickHouse/ClickHouse/issues/65538): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#65576](https://github.com/ClickHouse/ClickHouse/issues/65576): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backported in [#65159](https://github.com/ClickHouse/ClickHouse/issues/65159): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#65615](https://github.com/ClickHouse/ClickHouse/issues/65615): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#65728](https://github.com/ClickHouse/ClickHouse/issues/65728): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#65261](https://github.com/ClickHouse/ClickHouse/issues/65261): Fix the bug in Hashed and Hashed_Array dictionary short circuit evaluation, which may read uninitialized number, leading to various errors. [#65256](https://github.com/ClickHouse/ClickHouse/pull/65256) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Backported in [#65667](https://github.com/ClickHouse/ClickHouse/issues/65667): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65784](https://github.com/ClickHouse/ClickHouse/issues/65784): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#65929](https://github.com/ClickHouse/ClickHouse/issues/65929): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#65824](https://github.com/ClickHouse/ClickHouse/issues/65824): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#65223](https://github.com/ClickHouse/ClickHouse/issues/65223): Capture weak_ptr of ContextAccess for safety. [#65051](https://github.com/ClickHouse/ClickHouse/pull/65051) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Backported in [#65901](https://github.com/ClickHouse/ClickHouse/issues/65901): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
70
docs/changelogs/v24.4.4.107-stable.md
Normal file
70
docs/changelogs/v24.4.4.107-stable.md
Normal file
@ -0,0 +1,70 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.4.4.107-stable (af0ed6b197e) FIXME as compared to v24.4.3.25-stable (a915dd4eda4)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#65884](https://github.com/ClickHouse/ClickHouse/issues/65884): Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#65303](https://github.com/ClickHouse/ClickHouse/issues/65303): Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Backported in [#65894](https://github.com/ClickHouse/ClickHouse/issues/65894): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#65372](https://github.com/ClickHouse/ClickHouse/issues/65372): Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Backported in [#66883](https://github.com/ClickHouse/ClickHouse/issues/66883): Fix unexpeced size of low cardinality column in function calls. [#65298](https://github.com/ClickHouse/ClickHouse/pull/65298) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#65435](https://github.com/ClickHouse/ClickHouse/issues/65435): Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#65448](https://github.com/ClickHouse/ClickHouse/issues/65448): Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#65710](https://github.com/ClickHouse/ClickHouse/issues/65710): Fix crash in maxIntersections. [#65689](https://github.com/ClickHouse/ClickHouse/pull/65689) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66689](https://github.com/ClickHouse/ClickHouse/issues/66689): Fix the VALID UNTIL clause in the user definition resetting after a restart. Closes [#66405](https://github.com/ClickHouse/ClickHouse/issues/66405). [#66409](https://github.com/ClickHouse/ClickHouse/pull/66409) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#65353](https://github.com/ClickHouse/ClickHouse/issues/65353): Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#65060](https://github.com/ClickHouse/ClickHouse/issues/65060): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65329](https://github.com/ClickHouse/ClickHouse/issues/65329): Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#64833](https://github.com/ClickHouse/ClickHouse/issues/64833): Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#65086](https://github.com/ClickHouse/ClickHouse/issues/65086): Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65540](https://github.com/ClickHouse/ClickHouse/issues/65540): Fix crash for `ALTER TABLE ... ON CLUSTER ... MODIFY SQL SECURITY`. [#64957](https://github.com/ClickHouse/ClickHouse/pull/64957) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#65578](https://github.com/ClickHouse/ClickHouse/issues/65578): Fix crash on destroying AccessControl: add explicit shutdown. [#64993](https://github.com/ClickHouse/ClickHouse/pull/64993) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backported in [#65161](https://github.com/ClickHouse/ClickHouse/issues/65161): Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#65616](https://github.com/ClickHouse/ClickHouse/issues/65616): Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Backported in [#65730](https://github.com/ClickHouse/ClickHouse/issues/65730): Eliminate injective function in argument of functions `uniq*` recursively. This used to work correctly but was broken in the new analyzer. [#65140](https://github.com/ClickHouse/ClickHouse/pull/65140) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#65668](https://github.com/ClickHouse/ClickHouse/issues/65668): Disable `non-intersecting-parts` optimization for queries with `FINAL` in case of `read-in-order` optimization was enabled. This could lead to an incorrect query result. As a workaround, disable `do_not_merge_across_partitions_select_final` and `split_parts_ranges_into_intersecting_and_non_intersecting_final` before this fix is merged. [#65505](https://github.com/ClickHouse/ClickHouse/pull/65505) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#65786](https://github.com/ClickHouse/ClickHouse/issues/65786): Fixed bug in MergeJoin. Column in sparse serialisation might be treated as a column of its nested type though the required conversion wasn't performed. [#65632](https://github.com/ClickHouse/ClickHouse/pull/65632) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#65810](https://github.com/ClickHouse/ClickHouse/issues/65810): Fix invalid exceptions in function `parseDateTime` with `%F` and `%D` placeholders. [#65768](https://github.com/ClickHouse/ClickHouse/pull/65768) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#65931](https://github.com/ClickHouse/ClickHouse/issues/65931): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#65826](https://github.com/ClickHouse/ClickHouse/issues/65826): Fix a bug in short circuit logic when old analyzer and dictGetOrDefault is used. [#65802](https://github.com/ClickHouse/ClickHouse/pull/65802) ([jsc0218](https://github.com/jsc0218)).
|
||||
* Backported in [#66299](https://github.com/ClickHouse/ClickHouse/issues/66299): Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66326](https://github.com/ClickHouse/ClickHouse/issues/66326): Add missing settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines/input_format_csv_try_infer_numbers_from_strings/input_format_csv_try_infer_strings_from_quoted_tuples` in schema inference cache because they can change the resulting schema. It prevents from incorrect result of schema inference with these settings changed. [#65980](https://github.com/ClickHouse/ClickHouse/pull/65980) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#66153](https://github.com/ClickHouse/ClickHouse/issues/66153): Fixed buffer overflow bug in `unbin`/`unhex` implementation. [#66106](https://github.com/ClickHouse/ClickHouse/pull/66106) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#66459](https://github.com/ClickHouse/ClickHouse/issues/66459): Fixed a bug in ZooKeeper client: a session could get stuck in unusable state after receiving a hardware error from ZooKeeper. For example, this might happen due to "soft memory limit" in ClickHouse Keeper. [#66140](https://github.com/ClickHouse/ClickHouse/pull/66140) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66224](https://github.com/ClickHouse/ClickHouse/issues/66224): Fix issue in SumIfToCountIfVisitor and signed integers. [#66146](https://github.com/ClickHouse/ClickHouse/pull/66146) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66267](https://github.com/ClickHouse/ClickHouse/issues/66267): Don't throw `TIMEOUT_EXCEEDED` for `none_only_active` mode of `distributed_ddl_output_mode`. [#66218](https://github.com/ClickHouse/ClickHouse/pull/66218) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#66678](https://github.com/ClickHouse/ClickHouse/issues/66678): Fix handling limit for `system.numbers_mt` when no index can be used. [#66231](https://github.com/ClickHouse/ClickHouse/pull/66231) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Backported in [#66603](https://github.com/ClickHouse/ClickHouse/issues/66603): Fixed how the ClickHouse server detects the maximum number of usable CPU cores as specified by cgroups v2 if the server runs in a container such as Docker. In more detail, containers often run their process in the root cgroup which has an empty name. In that case, ClickHouse ignored the CPU limits set by cgroups v2. [#66237](https://github.com/ClickHouse/ClickHouse/pull/66237) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#66358](https://github.com/ClickHouse/ClickHouse/issues/66358): Fix the `Not-ready set` error when a subquery with `IN` is used in the constraint. [#66261](https://github.com/ClickHouse/ClickHouse/pull/66261) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66971](https://github.com/ClickHouse/ClickHouse/issues/66971): Fix `Column identifier is already registered` error with `group_by_use_nulls=true` and new analyzer. [#66400](https://github.com/ClickHouse/ClickHouse/pull/66400) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66968](https://github.com/ClickHouse/ClickHouse/issues/66968): Fix `Cannot find column` error for queries with constant expression in `GROUP BY` key and new analyzer enabled. [#66433](https://github.com/ClickHouse/ClickHouse/pull/66433) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66719](https://github.com/ClickHouse/ClickHouse/issues/66719): Correctly track memory for `Allocator::realloc`. [#66548](https://github.com/ClickHouse/ClickHouse/pull/66548) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66950](https://github.com/ClickHouse/ClickHouse/issues/66950): Fix an invalid result for queries with `WINDOW`. This could happen when `PARTITION` columns have sparse serialization and window functions are executed in parallel. [#66579](https://github.com/ClickHouse/ClickHouse/pull/66579) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66947](https://github.com/ClickHouse/ClickHouse/issues/66947): Fix `Method getResultType is not supported for QUERY query node` error when scalar subquery was used as the first argument of IN (with new analyzer). [#66655](https://github.com/ClickHouse/ClickHouse/pull/66655) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67195](https://github.com/ClickHouse/ClickHouse/issues/67195): TRUNCATE DATABASE used to stop replication as if it was a DROP DATABASE query, it's fixed. [#67129](https://github.com/ClickHouse/ClickHouse/pull/67129) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#67377](https://github.com/ClickHouse/ClickHouse/issues/67377): Fix error `Cannot convert column because it is non constant in source stream but must be constant in result.` for a query that reads from the `Merge` table over the `Distriburted` table with one shard. [#67146](https://github.com/ClickHouse/ClickHouse/pull/67146) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#67240](https://github.com/ClickHouse/ClickHouse/issues/67240): This closes [#67156](https://github.com/ClickHouse/ClickHouse/issues/67156). This closes [#66447](https://github.com/ClickHouse/ClickHouse/issues/66447). The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/62907. [#67178](https://github.com/ClickHouse/ClickHouse/pull/67178) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#65410](https://github.com/ClickHouse/ClickHouse/issues/65410): Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#65903](https://github.com/ClickHouse/ClickHouse/issues/65903): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#66385](https://github.com/ClickHouse/ClickHouse/issues/66385): Disable broken cases from 02911_join_on_nullsafe_optimization. [#66310](https://github.com/ClickHouse/ClickHouse/pull/66310) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#66424](https://github.com/ClickHouse/ClickHouse/issues/66424): Ignore subquery for IN in DDLLoadingDependencyVisitor. [#66395](https://github.com/ClickHouse/ClickHouse/pull/66395) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#66542](https://github.com/ClickHouse/ClickHouse/issues/66542): Add additional log masking in CI. [#66523](https://github.com/ClickHouse/ClickHouse/pull/66523) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#66857](https://github.com/ClickHouse/ClickHouse/issues/66857): Fix data race in S3::ClientCache. [#66644](https://github.com/ClickHouse/ClickHouse/pull/66644) ([Konstantin Morozov](https://github.com/k-morozov)).
|
||||
* Backported in [#66873](https://github.com/ClickHouse/ClickHouse/issues/66873): Support one more case in JOIN ON ... IS NULL. [#66725](https://github.com/ClickHouse/ClickHouse/pull/66725) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67057](https://github.com/ClickHouse/ClickHouse/issues/67057): Increase asio pool size in case the server is tiny. [#66761](https://github.com/ClickHouse/ClickHouse/pull/66761) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#66944](https://github.com/ClickHouse/ClickHouse/issues/66944): Small fix in realloc memory tracking. [#66820](https://github.com/ClickHouse/ClickHouse/pull/66820) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#67250](https://github.com/ClickHouse/ClickHouse/issues/67250): Followup [#66725](https://github.com/ClickHouse/ClickHouse/issues/66725). [#66869](https://github.com/ClickHouse/ClickHouse/pull/66869) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#67410](https://github.com/ClickHouse/ClickHouse/issues/67410): CI: Fix build results for release branches. [#67402](https://github.com/ClickHouse/ClickHouse/pull/67402) ([Max K.](https://github.com/maxknv)).
|
||||
|
24
docs/changelogs/v24.7.2.13-stable.md
Normal file
24
docs/changelogs/v24.7.2.13-stable.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.7.2.13-stable (6e41f601b2f) FIXME as compared to v24.7.1.2915-stable (a37d2d43da7)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#67531](https://github.com/ClickHouse/ClickHouse/issues/67531): In pr : https://github.com/ClickHouse/ClickHouse/pull/66025, we introduce a settings `input_format_orc_read_use_writer_time_zone` to fix when read orc file, make the reader use writer timezone, not always use `GMT`. [#67175](https://github.com/ClickHouse/ClickHouse/pull/67175) ([kevinyhzou](https://github.com/KevinyhZou)).
|
||||
|
||||
#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
|
||||
* Backported in [#67505](https://github.com/ClickHouse/ClickHouse/issues/67505): Fix crash in DistributedAsyncInsert when connection is empty. [#67219](https://github.com/ClickHouse/ClickHouse/pull/67219) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#67580](https://github.com/ClickHouse/ClickHouse/issues/67580): Fix execution of nested short-circuit functions. [#67520](https://github.com/ClickHouse/ClickHouse/pull/67520) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#67551](https://github.com/ClickHouse/ClickHouse/issues/67551): [Green CI] Fix test test_storage_s3_queue/test.py::test_max_set_age. [#67035](https://github.com/ClickHouse/ClickHouse/pull/67035) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#67514](https://github.com/ClickHouse/ClickHouse/issues/67514): Split test 02967_parallel_replicas_join_algo_and_analyzer. [#67211](https://github.com/ClickHouse/ClickHouse/pull/67211) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Backported in [#67545](https://github.com/ClickHouse/ClickHouse/issues/67545): [Green CI] Fix WriteBuffer destructor when finalize has failed for MergeTreeDeduplicationLog::shutdown. [#67474](https://github.com/ClickHouse/ClickHouse/pull/67474) ([Alexey Katsman](https://github.com/alexkats)).
|
||||
|
@ -103,8 +103,6 @@ Default: 2
|
||||
|
||||
The policy on how to perform a scheduling for background merges and mutations. Possible values are: `round_robin` and `shortest_task_first`.
|
||||
|
||||
## background_merges_mutations_scheduling_policy
|
||||
|
||||
Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart.
|
||||
Could be applied from the `default` profile for backward compatibility.
|
||||
|
||||
|
@ -5609,14 +5609,8 @@ Minimal size of block to compress in CROSS JOIN. Zero value means - disable this
|
||||
|
||||
Default value: `1GiB`.
|
||||
|
||||
## restore_replace_external_engines_to_null
|
||||
## disable_insertion_and_mutation
|
||||
|
||||
For testing purposes. Replaces all external engines to Null to not initiate external connections.
|
||||
Disable all insert and mutations (alter table update / alter table delete / alter table drop partition). Set to true, can make this node focus on reading queries.
|
||||
|
||||
Default value: `False`
|
||||
|
||||
## restore_replace_external_table_functions_to_null
|
||||
|
||||
For testing purposes. Replaces all external table functions to Null to not initiate external connections.
|
||||
|
||||
Default value: `False`
|
||||
Default value: `false`.
|
||||
|
@ -23,7 +23,7 @@ For more detail on window function syntax see: [Window Functions - Syntax](./ind
|
||||
**Parameters**
|
||||
- `x` — Column name.
|
||||
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
|
||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - default value of column type when omitted).
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -23,7 +23,7 @@ For more detail on window function syntax see: [Window Functions - Syntax](./ind
|
||||
**Parameters**
|
||||
- `x` — Column name.
|
||||
- `offset` — Offset to apply. [(U)Int*](../data-types/int-uint.md). (Optional - `1` by default).
|
||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - `null` by default).
|
||||
- `default` — Value to return if calculated row exceeds the boundaries of the window frame. (Optional - default value of column type when omitted).
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -80,7 +80,7 @@ namespace ErrorCodes
|
||||
|
||||
void applySettingsOverridesForLocal(ContextMutablePtr context)
|
||||
{
|
||||
Settings settings = context->getSettings();
|
||||
Settings settings = context->getSettingsCopy();
|
||||
|
||||
settings.allow_introspection_functions = true;
|
||||
settings.storage_file_read_method = LocalFSReadMethod::mmap;
|
||||
@ -184,6 +184,11 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
||||
cleanup_threads,
|
||||
0, // We don't need any threads one all the parts will be deleted
|
||||
cleanup_threads);
|
||||
|
||||
getDatabaseCatalogDropTablesThreadPool().initialize(
|
||||
server_settings.database_catalog_drop_table_concurrency,
|
||||
0, // We don't need any threads if there are no DROP queries.
|
||||
server_settings.database_catalog_drop_table_concurrency);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1043,6 +1043,11 @@ try
|
||||
0, // We don't need any threads once all the tables will be created
|
||||
max_database_replicated_create_table_thread_pool_size);
|
||||
|
||||
getDatabaseCatalogDropTablesThreadPool().initialize(
|
||||
server_settings.database_catalog_drop_table_concurrency,
|
||||
0, // We don't need any threads if there are no DROP queries.
|
||||
server_settings.database_catalog_drop_table_concurrency);
|
||||
|
||||
/// Initialize global local cache for remote filesystem.
|
||||
if (config().has("local_cache_for_remote_fs"))
|
||||
{
|
||||
|
@ -1,2 +1,2 @@
|
||||
clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS})
|
||||
target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
|
||||
target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)
|
||||
|
@ -12,38 +12,36 @@
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
ContextMutablePtr context;
|
||||
|
||||
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||
{
|
||||
if (context)
|
||||
return true;
|
||||
|
||||
SharedContextHolder shared_context = Context::createShared();
|
||||
context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
|
||||
MainThreadStatus::getInstance();
|
||||
|
||||
registerAggregateFunctions();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||
{
|
||||
try
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static SharedContextHolder shared_context;
|
||||
static ContextMutablePtr context;
|
||||
|
||||
auto initialize = [&]() mutable
|
||||
{
|
||||
if (context)
|
||||
return true;
|
||||
|
||||
shared_context = Context::createShared();
|
||||
context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
||||
|
||||
MainThreadStatus::getInstance();
|
||||
|
||||
registerAggregateFunctions();
|
||||
return true;
|
||||
};
|
||||
|
||||
static bool initialized = initialize();
|
||||
(void) initialized;
|
||||
|
||||
total_memory_tracker.resetCounters();
|
||||
total_memory_tracker.setHardLimit(1_GiB);
|
||||
CurrentThread::get().memory_tracker.resetCounters();
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
@ -42,7 +43,7 @@ public:
|
||||
if (lower_name.ends_with("if"))
|
||||
return;
|
||||
|
||||
auto & function_arguments_nodes = function_node->getArguments().getNodes();
|
||||
const auto & function_arguments_nodes = function_node->getArguments().getNodes();
|
||||
if (function_arguments_nodes.size() != 1)
|
||||
return;
|
||||
|
||||
@ -50,6 +51,8 @@ public:
|
||||
if (!if_node || if_node->getFunctionName() != "if")
|
||||
return;
|
||||
|
||||
FunctionNodePtr replaced_node;
|
||||
|
||||
auto if_arguments_nodes = if_node->getArguments().getNodes();
|
||||
auto * first_const_node = if_arguments_nodes[1]->as<ConstantNode>();
|
||||
auto * second_const_node = if_arguments_nodes[2]->as<ConstantNode>();
|
||||
@ -75,8 +78,11 @@ public:
|
||||
new_arguments[0] = std::move(if_arguments_nodes[1]);
|
||||
|
||||
new_arguments[1] = std::move(if_arguments_nodes[0]);
|
||||
function_arguments_nodes = std::move(new_arguments);
|
||||
resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If");
|
||||
|
||||
replaced_node = std::make_shared<FunctionNode>(function_node->getFunctionName() + "If");
|
||||
replaced_node->getArguments().getNodes() = std::move(new_arguments);
|
||||
replaced_node->getParameters().getNodes() = function_node->getParameters().getNodes();
|
||||
resolveAggregateFunctionNodeByName(*replaced_node, replaced_node->getFunctionName());
|
||||
}
|
||||
}
|
||||
else if (first_const_node)
|
||||
@ -104,10 +110,26 @@ public:
|
||||
FunctionFactory::instance().get("not", getContext())->build(not_function->getArgumentColumns()));
|
||||
new_arguments[1] = std::move(not_function);
|
||||
|
||||
function_arguments_nodes = std::move(new_arguments);
|
||||
resolveAggregateFunctionNodeByName(*function_node, function_node->getFunctionName() + "If");
|
||||
replaced_node = std::make_shared<FunctionNode>(function_node->getFunctionName() + "If");
|
||||
replaced_node->getArguments().getNodes() = std::move(new_arguments);
|
||||
replaced_node->getParameters().getNodes() = function_node->getParameters().getNodes();
|
||||
resolveAggregateFunctionNodeByName(*replaced_node, replaced_node->getFunctionName());
|
||||
}
|
||||
}
|
||||
|
||||
if (!replaced_node)
|
||||
return;
|
||||
|
||||
auto prev_type = function_node->getResultType();
|
||||
auto curr_type = replaced_node->getResultType();
|
||||
if (!prev_type->equals(*curr_type))
|
||||
return;
|
||||
|
||||
/// Just in case, CAST compatible aggregate function states.
|
||||
if (WhichDataType(prev_type).isAggregateFunction() && !DataTypeAggregateFunction::strictEquals(prev_type, curr_type))
|
||||
node = createCastFunction(std::move(replaced_node), prev_type, getContext());
|
||||
else
|
||||
node = std::move(replaced_node);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -237,7 +237,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
||||
/// Remove global settings limit and offset
|
||||
if (const auto & settings_ref = updated_context->getSettingsRef(); settings_ref.limit || settings_ref.offset)
|
||||
{
|
||||
Settings settings = updated_context->getSettings();
|
||||
Settings settings = updated_context->getSettingsCopy();
|
||||
limit = settings.limit;
|
||||
offset = settings.offset;
|
||||
settings.limit = 0;
|
||||
|
@ -503,7 +503,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
|
||||
ProfileEvents::increment(ProfileEvents::ScalarSubqueriesCacheMiss);
|
||||
auto subquery_context = Context::createCopy(context);
|
||||
|
||||
Settings subquery_settings = context->getSettings();
|
||||
Settings subquery_settings = context->getSettingsCopy();
|
||||
subquery_settings.max_result_rows = 1;
|
||||
subquery_settings.extremes = false;
|
||||
subquery_context->setSettings(subquery_settings);
|
||||
|
@ -867,7 +867,7 @@ void updateContextForSubqueryExecution(ContextMutablePtr & mutable_context)
|
||||
* max_rows_in_join, max_bytes_in_join, join_overflow_mode,
|
||||
* which are checked separately (in the Set, Join objects).
|
||||
*/
|
||||
Settings subquery_settings = mutable_context->getSettings();
|
||||
Settings subquery_settings = mutable_context->getSettingsCopy();
|
||||
subquery_settings.max_result_rows = 0;
|
||||
subquery_settings.max_result_bytes = 0;
|
||||
/// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query).
|
||||
|
@ -232,7 +232,7 @@ int IBridge::main(const std::vector<std::string> & /*args*/)
|
||||
auto context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
|
||||
auto settings = context->getSettings();
|
||||
auto settings = context->getSettingsCopy();
|
||||
settings.set("http_max_field_value_size", http_max_field_value_size);
|
||||
context->setSettings(settings);
|
||||
|
||||
|
@ -656,7 +656,7 @@ void ClientBase::initLogsOutputStream()
|
||||
|
||||
void ClientBase::adjustSettings()
|
||||
{
|
||||
Settings settings = global_context->getSettings();
|
||||
Settings settings = global_context->getSettingsCopy();
|
||||
|
||||
/// NOTE: Do not forget to set changed=false to avoid sending it to the server (to avoid breakage read only profiles)
|
||||
|
||||
@ -865,7 +865,7 @@ bool ClientBase::isSyncInsertWithData(const ASTInsertQuery & insert_query, const
|
||||
if (!insert_query.data)
|
||||
return false;
|
||||
|
||||
auto settings = context->getSettings();
|
||||
auto settings = context->getSettingsCopy();
|
||||
if (insert_query.settings_ast)
|
||||
settings.applyChanges(insert_query.settings_ast->as<ASTSetQuery>()->changes);
|
||||
|
||||
@ -2671,7 +2671,7 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name)
|
||||
|
||||
if (!getClientConfiguration().has("log_comment"))
|
||||
{
|
||||
Settings settings = client_context->getSettings();
|
||||
Settings settings = client_context->getSettingsCopy();
|
||||
/// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]"
|
||||
settings.log_comment = fs::absolute(fs::path(file_name));
|
||||
client_context->setSettings(settings);
|
||||
|
@ -42,6 +42,7 @@ public:
|
||||
size_t max_error_cap = DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT);
|
||||
|
||||
using Entry = IConnectionPool::Entry;
|
||||
using PoolWithFailoverBase<IConnectionPool>::isTryResultInvalid;
|
||||
|
||||
/** Allocates connection to work. */
|
||||
Entry get(const ConnectionTimeouts & timeouts) override;
|
||||
|
@ -296,16 +296,28 @@ ColumnWithTypeAndName ColumnFunction::reduce() const
|
||||
function->getName(), toString(args), toString(captured));
|
||||
|
||||
ColumnsWithTypeAndName columns = captured_columns;
|
||||
IFunction::ShortCircuitSettings settings;
|
||||
/// Arguments of lazy executed function can also be lazy executed.
|
||||
/// But we shouldn't execute arguments if this function is short circuit,
|
||||
/// because it will handle lazy executed arguments by itself.
|
||||
if (is_short_circuit_argument && !function->isShortCircuit(settings, args))
|
||||
if (is_short_circuit_argument)
|
||||
{
|
||||
for (auto & col : columns)
|
||||
IFunction::ShortCircuitSettings settings;
|
||||
/// We shouldn't execute all arguments if this function is short circuit,
|
||||
/// because it will handle lazy executed arguments by itself.
|
||||
/// Execute only arguments with disabled lazy execution.
|
||||
if (function->isShortCircuit(settings, args))
|
||||
{
|
||||
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(col.column))
|
||||
col = arg->reduce();
|
||||
for (size_t i : settings.arguments_with_disabled_lazy_execution)
|
||||
{
|
||||
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(columns[i].column))
|
||||
columns[i] = arg->reduce();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto & col : columns)
|
||||
{
|
||||
if (const ColumnFunction * arg = checkAndGetShortCircuitArgument(col.column))
|
||||
col = arg->reduce();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,10 @@ inline std::string_view toDescription(OvercommitResult result)
|
||||
|
||||
bool shouldTrackAllocation(Float64 probability, void * ptr)
|
||||
{
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||
return intHash64(uintptr_t(ptr)) < std::numeric_limits<uint64_t>::max() * probability;
|
||||
#pragma clang diagnostic pop
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -116,6 +116,12 @@ public:
|
||||
const TryGetEntryFunc & try_get_entry,
|
||||
const GetPriorityFunc & get_priority);
|
||||
|
||||
// Returns if the TryResult provided is an invalid one that cannot be used. Used to prevent logical errors.
|
||||
bool isTryResultInvalid(const TryResult & result, bool skip_read_only_replicas) const
|
||||
{
|
||||
return result.entry.isNull() || !result.is_usable || (skip_read_only_replicas && result.is_readonly);
|
||||
}
|
||||
|
||||
size_t getPoolSize() const { return nested_pools.size(); }
|
||||
|
||||
protected:
|
||||
@ -300,7 +306,7 @@ PoolWithFailoverBase<TNestedPool>::getMany(
|
||||
throw DB::NetException(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED,
|
||||
"All connection tries failed. Log: \n\n{}\n", fail_messages);
|
||||
|
||||
std::erase_if(try_results, [&](const TryResult & r) { return r.entry.isNull() || !r.is_usable || (skip_read_only_replicas && r.is_readonly); });
|
||||
std::erase_if(try_results, [&](const TryResult & r) { return isTryResultInvalid(r, skip_read_only_replicas); });
|
||||
|
||||
/// Sort so that preferred items are near the beginning.
|
||||
std::stable_sort(
|
||||
@ -321,6 +327,9 @@ PoolWithFailoverBase<TNestedPool>::getMany(
|
||||
}
|
||||
else if (up_to_date_count >= min_entries)
|
||||
{
|
||||
if (try_results.size() < up_to_date_count)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Could not find enough connections for up-to-date results. Got: {}, needed: {}", try_results.size(), up_to_date_count);
|
||||
|
||||
/// There is enough up-to-date entries.
|
||||
try_results.resize(up_to_date_count);
|
||||
}
|
||||
|
@ -2,9 +2,11 @@
|
||||
|
||||
#include <Common/TimerDescriptor.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <sys/timerfd.h>
|
||||
#include <unistd.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -89,9 +91,29 @@ void TimerDescriptor::drain() const
|
||||
|
||||
/// A signal happened, need to retry.
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
{
|
||||
/** This is to help with debugging.
|
||||
*
|
||||
* Sometimes reading from timer_fd blocks, which should not happen, because we opened it in a non-blocking mode.
|
||||
* But it could be possible if a rogue 3rd-party library closed our file descriptor by mistake
|
||||
* (for example by double closing due to the lack of exception safety or if it is a crappy code in plain C)
|
||||
* and then another file descriptor is opened in its place.
|
||||
*
|
||||
* Let's try to get a name of this file descriptor and log it.
|
||||
*/
|
||||
LoggerPtr log = getLogger("TimerDescriptor");
|
||||
|
||||
throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot drain timer_fd");
|
||||
static constexpr ssize_t max_link_path_length = 256;
|
||||
char link_path[max_link_path_length];
|
||||
ssize_t link_path_length = readlink(fmt::format("/proc/self/fd/{}", timer_fd).c_str(), link_path, max_link_path_length);
|
||||
if (-1 == link_path_length)
|
||||
throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot readlink for a timer_fd {}", timer_fd);
|
||||
|
||||
LOG_TRACE(log, "Received EINTR while trying to drain a TimerDescriptor, fd {}: {}", timer_fd, std::string_view(link_path, link_path_length));
|
||||
continue;
|
||||
}
|
||||
|
||||
throw ErrnoException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot drain timer_fd {}", timer_fd);
|
||||
}
|
||||
|
||||
chassert(res == sizeof(buf));
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "ServerSettings.h"
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
|
||||
namespace DB
|
||||
|
@ -66,6 +66,15 @@ namespace DB
|
||||
M(Bool, async_insert_queue_flush_on_shutdown, true, "If true queue of asynchronous inserts is flushed on graceful shutdown", 0) \
|
||||
M(Bool, ignore_empty_sql_security_in_create_view_query, true, "If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries. This setting is only necessary for the migration period and will become obsolete in 24.4", 0) \
|
||||
\
|
||||
/* Database Catalog */ \
|
||||
M(UInt64, database_atomic_delay_before_drop_table_sec, 8 * 60, "The delay during which a dropped table can be restored using the UNDROP statement. If DROP TABLE ran with a SYNC modifier, the setting is ignored.", 0) \
|
||||
M(UInt64, database_catalog_unused_dir_hide_timeout_sec, 60 * 60, "Parameter of a task that cleans up garbage from store/ directory. If some subdirectory is not used by clickhouse-server and this directory was not modified for last database_catalog_unused_dir_hide_timeout_sec seconds, the task will 'hide' this directory by removing all access rights. It also works for directories that clickhouse-server does not expect to see inside store/. Zero means 'immediately'.", 0) \
|
||||
M(UInt64, database_catalog_unused_dir_rm_timeout_sec, 30 * 24 * 60 * 60, "Parameter of a task that cleans up garbage from store/ directory. If some subdirectory is not used by clickhouse-server and it was previously 'hidden' (see database_catalog_unused_dir_hide_timeout_sec) and this directory was not modified for last database_catalog_unused_dir_rm_timeout_sec seconds, the task will remove this directory. It also works for directories that clickhouse-server does not expect to see inside store/. Zero means 'never'.", 0) \
|
||||
M(UInt64, database_catalog_unused_dir_cleanup_period_sec, 24 * 60 * 60, "Parameter of a task that cleans up garbage from store/ directory. Sets scheduling period of the task. Zero means 'never'.", 0) \
|
||||
M(UInt64, database_catalog_drop_error_cooldown_sec, 5, "In case if drop table failed, ClickHouse will wait for this timeout before retrying the operation.", 0) \
|
||||
M(UInt64, database_catalog_drop_table_concurrency, 16, "The size of the threadpool used for dropping tables.", 0) \
|
||||
\
|
||||
\
|
||||
M(UInt64, max_concurrent_queries, 0, "Maximum number of concurrently executed queries. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_concurrent_insert_queries, 0, "Maximum number of concurrently INSERT queries. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_concurrent_select_queries, 0, "Maximum number of concurrently SELECT queries. Zero means unlimited.", 0) \
|
||||
@ -157,6 +166,7 @@ namespace DB
|
||||
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \
|
||||
M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \
|
||||
M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \
|
||||
M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0)
|
||||
|
||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
||||
|
||||
|
@ -893,8 +893,6 @@ class IColumn;
|
||||
M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
|
||||
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
||||
M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \
|
||||
M(Bool, restore_replace_external_engines_to_null, false, "Replace all the external table engines to Null on restore. Useful for testing purposes", 0) \
|
||||
M(Bool, restore_replace_external_table_functions_to_null, false, "Replace all table functions to Null on restore. Useful for testing purposes", 0) \
|
||||
\
|
||||
\
|
||||
/* ###################################### */ \
|
||||
@ -1075,7 +1073,7 @@ class IColumn;
|
||||
M(Bool, input_format_orc_allow_missing_columns, true, "Allow missing columns while reading ORC input formats", 0) \
|
||||
M(Bool, input_format_orc_use_fast_decoder, true, "Use a faster ORC decoder implementation.", 0) \
|
||||
M(Bool, input_format_orc_filter_push_down, true, "When reading ORC files, skip whole stripes or row groups based on the WHERE/PREWHERE expressions, min/max statistics or bloom filter in the ORC metadata.", 0) \
|
||||
M(Bool, input_format_orc_read_use_writer_time_zone, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT.", 0) \
|
||||
M(String, input_format_orc_reader_time_zone_name, "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT.", 0) \
|
||||
M(Bool, input_format_parquet_allow_missing_columns, true, "Allow missing columns while reading Parquet input formats", 0) \
|
||||
M(UInt64, input_format_parquet_local_file_min_bytes_for_seek, 8192, "Min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format", 0) \
|
||||
M(Bool, input_format_arrow_allow_missing_columns, true, "Allow missing columns while reading Arrow input formats", 0) \
|
||||
|
@ -69,7 +69,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"dictionary_validate_primary_key_type", false, false, "Validate primary key type for dictionaries. By default id type for simple layouts will be implicitly converted to UInt64."},
|
||||
{"collect_hash_table_stats_during_joins", false, true, "New setting."},
|
||||
{"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."},
|
||||
{"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."},
|
||||
{"input_format_orc_reader_time_zone_name", "GMT", "GMT", "The time zone name for ORC row reader, the default ORC row reader's time zone is GMT."},
|
||||
{"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."},
|
||||
{"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"},
|
||||
{"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"},
|
||||
@ -80,9 +80,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"ignore_on_cluster_for_replicated_named_collections_queries", false, false, "Ignore ON CLUSTER clause for replicated named collections management queries."},
|
||||
{"backup_restore_s3_retry_attempts", 1000,1000, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries. It takes place only for backup/restore."},
|
||||
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
|
||||
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."},
|
||||
{"restore_replace_external_table_functions_to_null", false, false, "New setting."},
|
||||
{"restore_replace_external_engines_to_null", false, false, "New setting."}
|
||||
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}
|
||||
}},
|
||||
{"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"},
|
||||
{"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"},
|
||||
|
@ -271,9 +271,12 @@ namespace
|
||||
if (d != 0.0 && !std::isnormal(d))
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_PARSE_NUMBER, "A setting's value in seconds must be a normal floating point number or zero. Got {}", d);
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||
if (d * 1000000 > std::numeric_limits<Poco::Timespan::TimeDiff>::max() || d * 1000000 < std::numeric_limits<Poco::Timespan::TimeDiff>::min())
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS, "Cannot convert seconds to microseconds: the setting's value in seconds is too big: {}", d);
|
||||
#pragma clang diagnostic pop
|
||||
|
||||
return static_cast<Poco::Timespan::TimeDiff>(d * 1000000);
|
||||
}
|
||||
|
@ -1,2 +1,2 @@
|
||||
clickhouse_add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp)
|
||||
target_link_libraries (names_and_types_fuzzer PRIVATE dbms)
|
||||
target_link_libraries (names_and_types_fuzzer PRIVATE dbms clickhouse_functions)
|
||||
|
@ -1,2 +1,2 @@
|
||||
clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS})
|
||||
target_link_libraries(data_type_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
|
||||
target_link_libraries(data_type_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)
|
||||
|
@ -12,35 +12,30 @@
|
||||
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
ContextMutablePtr context;
|
||||
|
||||
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||
{
|
||||
if (context)
|
||||
return true;
|
||||
|
||||
SharedContextHolder shared_context = Context::createShared();
|
||||
context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
|
||||
MainThreadStatus::getInstance();
|
||||
|
||||
registerAggregateFunctions();
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||
{
|
||||
try
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static SharedContextHolder shared_context;
|
||||
static ContextMutablePtr context;
|
||||
|
||||
auto initialize = [&]() mutable
|
||||
{
|
||||
if (context)
|
||||
return true;
|
||||
|
||||
shared_context = Context::createShared();
|
||||
context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
||||
|
||||
MainThreadStatus::getInstance();
|
||||
|
||||
registerAggregateFunctions();
|
||||
return true;
|
||||
};
|
||||
|
||||
static bool initialized = initialize();
|
||||
(void) initialized;
|
||||
|
||||
total_memory_tracker.resetCounters();
|
||||
total_memory_tracker.setHardLimit(1_GiB);
|
||||
CurrentThread::get().memory_tracker.resetCounters();
|
||||
|
@ -111,7 +111,7 @@ ASTPtr DatabaseDictionary::getCreateTableQueryImpl(const String & table_name, Co
|
||||
buffer << ") Engine = Dictionary(" << backQuoteIfNeed(table_name) << ")";
|
||||
}
|
||||
|
||||
auto settings = getContext()->getSettingsRef();
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
const char * pos = query.data();
|
||||
std::string error_message;
|
||||
@ -133,7 +133,7 @@ ASTPtr DatabaseDictionary::getCreateDatabaseQuery() const
|
||||
if (const auto comment_value = getDatabaseComment(); !comment_value.empty())
|
||||
buffer << " COMMENT " << backQuote(comment_value);
|
||||
}
|
||||
auto settings = getContext()->getSettingsRef();
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
return parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth, settings.max_parser_backtracks);
|
||||
}
|
||||
|
@ -534,7 +534,7 @@ ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const
|
||||
{
|
||||
ASTPtr ast;
|
||||
|
||||
auto settings = getContext()->getSettingsRef();
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto database_metadata_path = getContext()->getPath() + "metadata/" + escapeForFileName(database_name) + ".sql";
|
||||
@ -733,7 +733,7 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto settings = local_context->getSettingsRef();
|
||||
const auto & settings = local_context->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
const char * pos = query.data();
|
||||
std::string error_message;
|
||||
|
@ -89,7 +89,7 @@ static constexpr auto MYSQL_BACKGROUND_THREAD_NAME = "MySQLDBSync";
|
||||
|
||||
static ContextMutablePtr createQueryContext(ContextPtr context)
|
||||
{
|
||||
Settings new_query_settings = context->getSettings();
|
||||
Settings new_query_settings = context->getSettingsCopy();
|
||||
new_query_settings.insert_allow_materialized_columns = true;
|
||||
|
||||
/// To avoid call AST::format
|
||||
|
@ -243,7 +243,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
|
||||
format_settings.orc.output_row_index_stride = settings.output_format_orc_row_index_stride;
|
||||
format_settings.orc.use_fast_decoder = settings.input_format_orc_use_fast_decoder;
|
||||
format_settings.orc.filter_push_down = settings.input_format_orc_filter_push_down;
|
||||
format_settings.orc.read_use_writer_time_zone = settings.input_format_orc_read_use_writer_time_zone;
|
||||
format_settings.orc.reader_time_zone_name = settings.input_format_orc_reader_time_zone_name;
|
||||
format_settings.defaults_for_omitted_fields = settings.input_format_defaults_for_omitted_fields;
|
||||
format_settings.capn_proto.enum_comparing_mode = settings.format_capn_proto_enum_comparising_mode;
|
||||
format_settings.capn_proto.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference;
|
||||
|
@ -409,7 +409,7 @@ struct FormatSettings
|
||||
bool use_fast_decoder = true;
|
||||
bool filter_push_down = true;
|
||||
UInt64 output_row_index_stride = 10'000;
|
||||
bool read_use_writer_time_zone = false;
|
||||
String reader_time_zone_name = "GMT";
|
||||
} orc{};
|
||||
|
||||
/// For capnProto format we should determine how to
|
||||
|
@ -1,2 +1,2 @@
|
||||
clickhouse_add_executable(format_fuzzer format_fuzzer.cpp ${SRCS})
|
||||
target_link_libraries(format_fuzzer PRIVATE dbms clickhouse_aggregate_functions)
|
||||
target_link_libraries(format_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions)
|
||||
|
@ -20,37 +20,32 @@
|
||||
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
ContextMutablePtr context;
|
||||
|
||||
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||
{
|
||||
if (context)
|
||||
return true;
|
||||
|
||||
SharedContextHolder shared_context = Context::createShared();
|
||||
context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
|
||||
MainThreadStatus::getInstance();
|
||||
|
||||
registerAggregateFunctions();
|
||||
registerFormats();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||
{
|
||||
try
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static SharedContextHolder shared_context;
|
||||
static ContextMutablePtr context;
|
||||
|
||||
auto initialize = [&]() mutable
|
||||
{
|
||||
if (context)
|
||||
return true;
|
||||
|
||||
shared_context = Context::createShared();
|
||||
context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
||||
|
||||
MainThreadStatus::getInstance();
|
||||
|
||||
registerAggregateFunctions();
|
||||
registerFormats();
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
static bool initialized = initialize();
|
||||
(void) initialized;
|
||||
|
||||
total_memory_tracker.resetCounters();
|
||||
total_memory_tracker.setHardLimit(1_GiB);
|
||||
CurrentThread::get().memory_tracker.resetCounters();
|
||||
|
@ -2146,7 +2146,10 @@ struct Transformer
|
||||
if constexpr (std::is_same_v<Additions, DateTimeAccurateConvertStrategyAdditions>
|
||||
|| std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
||||
{
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||
bool is_valid_input = vec_from[i] >= 0 && vec_from[i] <= 0xFFFFFFFFL;
|
||||
#pragma clang diagnostic pop
|
||||
if (!is_valid_input)
|
||||
{
|
||||
if constexpr (std::is_same_v<Additions, DateTimeAccurateOrNullConvertStrategyAdditions>)
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
FunctionFormatQuery(ContextPtr context, String name_, OutputFormatting output_formatting_, ErrorHandling error_handling_)
|
||||
: name(name_), output_formatting(output_formatting_), error_handling(error_handling_)
|
||||
{
|
||||
const Settings & settings = context->getSettings();
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
max_query_size = settings.max_query_size;
|
||||
max_parser_depth = settings.max_parser_depth;
|
||||
max_parser_backtracks = settings.max_parser_backtracks;
|
||||
|
@ -143,7 +143,7 @@ ColumnPtr FunctionHasColumnInTable::executeImpl(const ColumnsWithTypeAndName & a
|
||||
/* cluster_name= */ "",
|
||||
/* password= */ ""
|
||||
};
|
||||
auto cluster = std::make_shared<Cluster>(getContext()->getSettings(), host_names, params);
|
||||
auto cluster = std::make_shared<Cluster>(getContext()->getSettingsRef(), host_names, params);
|
||||
|
||||
// FIXME this (probably) needs a non-constant access to query context,
|
||||
// because it might initialized a storage. Ideally, the tables required
|
||||
|
@ -217,7 +217,10 @@ private:
|
||||
}
|
||||
|
||||
Float64 num_bytes_with_decimals = base * iter->second;
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wimplicit-const-int-float-conversion"
|
||||
if (num_bytes_with_decimals > std::numeric_limits<UInt64>::max())
|
||||
#pragma clang diagnostic pop
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
|
@ -3,8 +3,6 @@
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <base/getPageSize.h>
|
||||
|
@ -23,6 +23,9 @@ namespace CurrentMetrics
|
||||
extern const Metric MergeTreeUnexpectedPartsLoaderThreads;
|
||||
extern const Metric MergeTreeUnexpectedPartsLoaderThreadsActive;
|
||||
extern const Metric MergeTreeUnexpectedPartsLoaderThreadsScheduled;
|
||||
extern const Metric DatabaseCatalogThreads;
|
||||
extern const Metric DatabaseCatalogThreadsActive;
|
||||
extern const Metric DatabaseCatalogThreadsScheduled;
|
||||
extern const Metric DatabaseReplicatedCreateTablesThreads;
|
||||
extern const Metric DatabaseReplicatedCreateTablesThreadsActive;
|
||||
extern const Metric DatabaseReplicatedCreateTablesThreadsScheduled;
|
||||
@ -166,4 +169,11 @@ StaticThreadPool & getDatabaseReplicatedCreateTablesThreadPool()
|
||||
return instance;
|
||||
}
|
||||
|
||||
/// ThreadPool used for dropping tables.
|
||||
StaticThreadPool & getDatabaseCatalogDropTablesThreadPool()
|
||||
{
|
||||
static StaticThreadPool instance("DropTablesThreadPool", CurrentMetrics::DatabaseCatalogThreads, CurrentMetrics::DatabaseCatalogThreadsActive, CurrentMetrics::DatabaseCatalogThreadsScheduled);
|
||||
return instance;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -69,4 +69,7 @@ StaticThreadPool & getUnexpectedPartsLoadingThreadPool();
|
||||
/// ThreadPool used for creating tables in DatabaseReplicated.
|
||||
StaticThreadPool & getDatabaseReplicatedCreateTablesThreadPool();
|
||||
|
||||
/// ThreadPool used for dropping tables.
|
||||
StaticThreadPool & getDatabaseCatalogDropTablesThreadPool();
|
||||
|
||||
}
|
||||
|
@ -2270,7 +2270,7 @@ bool Context::displaySecretsInShowAndSelect() const
|
||||
return shared->server_settings.display_secrets_in_show_and_select;
|
||||
}
|
||||
|
||||
Settings Context::getSettings() const
|
||||
Settings Context::getSettingsCopy() const
|
||||
{
|
||||
SharedLockGuard lock(mutex);
|
||||
return *settings;
|
||||
@ -3494,18 +3494,22 @@ DDLWorker & Context::getDDLWorker() const
|
||||
if (shared->ddl_worker_startup_task)
|
||||
waitLoad(shared->ddl_worker_startup_task); // Just wait and do not prioritize, because it depends on all load and startup tasks
|
||||
|
||||
SharedLockGuard lock(shared->mutex);
|
||||
if (!shared->ddl_worker)
|
||||
{
|
||||
if (!hasZooKeeper())
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no Zookeeper configuration in server config");
|
||||
|
||||
if (!hasDistributedDDL())
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no DistributedDDL configuration in server config");
|
||||
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "DDL background thread is not initialized");
|
||||
/// Only acquire the lock for reading ddl_worker field.
|
||||
/// hasZooKeeper() and hasDistributedDDL() acquire the same lock as well and double acquisition of the lock in shared mode can lead
|
||||
/// to a deadlock if an exclusive lock attempt is made in the meantime by another thread.
|
||||
SharedLockGuard lock(shared->mutex);
|
||||
if (shared->ddl_worker)
|
||||
return *shared->ddl_worker;
|
||||
}
|
||||
return *shared->ddl_worker;
|
||||
|
||||
if (!hasZooKeeper())
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no Zookeeper configuration in server config");
|
||||
|
||||
if (!hasDistributedDDL())
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "There is no DistributedDDL configuration in server config");
|
||||
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "DDL background thread is not initialized");
|
||||
}
|
||||
|
||||
zkutil::ZooKeeperPtr Context::getZooKeeper() const
|
||||
|
@ -829,7 +829,8 @@ public:
|
||||
void setMacros(std::unique_ptr<Macros> && macros);
|
||||
|
||||
bool displaySecretsInShowAndSelect() const;
|
||||
Settings getSettings() const;
|
||||
Settings getSettingsCopy() const;
|
||||
const Settings & getSettingsRef() const { return *settings; }
|
||||
void setSettings(const Settings & settings_);
|
||||
|
||||
/// Set settings by name.
|
||||
@ -954,8 +955,6 @@ public:
|
||||
void makeSessionContext();
|
||||
void makeGlobalContext();
|
||||
|
||||
const Settings & getSettingsRef() const { return *settings; }
|
||||
|
||||
void setProgressCallback(ProgressCallback callback);
|
||||
/// Used in executeQuery() to pass it to the QueryPipeline.
|
||||
ProgressCallback getProgressCallback() const;
|
||||
|
@ -19,6 +19,8 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Poco/DirectoryIterator.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <IO/SharedThreadPools.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/atomicRename.h>
|
||||
@ -48,9 +50,6 @@
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric TablesToDropQueueSize;
|
||||
extern const Metric DatabaseCatalogThreads;
|
||||
extern const Metric DatabaseCatalogThreadsActive;
|
||||
extern const Metric DatabaseCatalogThreadsScheduled;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -189,13 +188,6 @@ StoragePtr TemporaryTableHolder::getTable() const
|
||||
|
||||
void DatabaseCatalog::initializeAndLoadTemporaryDatabase()
|
||||
{
|
||||
drop_delay_sec = getContext()->getConfigRef().getInt("database_atomic_delay_before_drop_table_sec", default_drop_delay_sec);
|
||||
unused_dir_hide_timeout_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_hide_timeout_sec", unused_dir_hide_timeout_sec);
|
||||
unused_dir_rm_timeout_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_rm_timeout_sec", unused_dir_rm_timeout_sec);
|
||||
unused_dir_cleanup_period_sec = getContext()->getConfigRef().getInt64("database_catalog_unused_dir_cleanup_period_sec", unused_dir_cleanup_period_sec);
|
||||
drop_error_cooldown_sec = getContext()->getConfigRef().getInt64("database_catalog_drop_error_cooldown_sec", drop_error_cooldown_sec);
|
||||
drop_table_concurrency = getContext()->getConfigRef().getInt64("database_catalog_drop_table_concurrency", drop_table_concurrency);
|
||||
|
||||
auto db_for_temporary_and_external_tables = std::make_shared<DatabaseMemory>(TEMPORARY_DATABASE, getContext());
|
||||
attachDatabase(TEMPORARY_DATABASE, db_for_temporary_and_external_tables);
|
||||
}
|
||||
@ -203,7 +195,7 @@ void DatabaseCatalog::initializeAndLoadTemporaryDatabase()
|
||||
void DatabaseCatalog::createBackgroundTasks()
|
||||
{
|
||||
/// It has to be done before databases are loaded (to avoid a race condition on initialization)
|
||||
if (Context::getGlobalContextInstance()->getApplicationType() == Context::ApplicationType::SERVER && unused_dir_cleanup_period_sec)
|
||||
if (Context::getGlobalContextInstance()->getApplicationType() == Context::ApplicationType::SERVER && getContext()->getServerSettings().database_catalog_unused_dir_cleanup_period_sec)
|
||||
{
|
||||
auto cleanup_task_holder
|
||||
= getContext()->getSchedulePool().createTask("DatabaseCatalogCleanupStoreDirectoryTask", [this]() { this->cleanupStoreDirectoryTask(); });
|
||||
@ -224,7 +216,7 @@ void DatabaseCatalog::startupBackgroundTasks()
|
||||
{
|
||||
(*cleanup_task)->activate();
|
||||
/// Do not start task immediately on server startup, it's not urgent.
|
||||
(*cleanup_task)->scheduleAfter(unused_dir_hide_timeout_sec * 1000);
|
||||
(*cleanup_task)->scheduleAfter(static_cast<time_t>(getContext()->getServerSettings().database_catalog_unused_dir_hide_timeout_sec) * 1000);
|
||||
}
|
||||
|
||||
(*drop_task)->activate();
|
||||
@ -1038,15 +1030,12 @@ void DatabaseCatalog::loadMarkedAsDroppedTables()
|
||||
|
||||
LOG_INFO(log, "Found {} partially dropped tables. Will load them and retry removal.", dropped_metadata.size());
|
||||
|
||||
ThreadPool pool(CurrentMetrics::DatabaseCatalogThreads, CurrentMetrics::DatabaseCatalogThreadsActive, CurrentMetrics::DatabaseCatalogThreadsScheduled);
|
||||
ThreadPoolCallbackRunnerLocal<void> runner(getDatabaseCatalogDropTablesThreadPool().get(), "DropTables");
|
||||
for (const auto & elem : dropped_metadata)
|
||||
{
|
||||
pool.scheduleOrThrowOnError([&]()
|
||||
{
|
||||
this->enqueueDroppedTableCleanup(elem.second, nullptr, elem.first);
|
||||
});
|
||||
runner([this, &elem](){ this->enqueueDroppedTableCleanup(elem.second, nullptr, elem.first); });
|
||||
}
|
||||
pool.wait();
|
||||
runner.waitForAllToFinishAndRethrowFirstError();
|
||||
}
|
||||
|
||||
String DatabaseCatalog::getPathForDroppedMetadata(const StorageID & table_id) const
|
||||
@ -1135,7 +1124,13 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr
|
||||
}
|
||||
else
|
||||
{
|
||||
tables_marked_dropped.push_back({table_id, table, dropped_metadata_path, drop_time + drop_delay_sec});
|
||||
tables_marked_dropped.push_back
|
||||
({
|
||||
table_id,
|
||||
table,
|
||||
dropped_metadata_path,
|
||||
drop_time + static_cast<time_t>(getContext()->getServerSettings().database_atomic_delay_before_drop_table_sec)
|
||||
});
|
||||
if (first_async_drop_in_queue == tables_marked_dropped.end())
|
||||
--first_async_drop_in_queue;
|
||||
}
|
||||
@ -1289,13 +1284,7 @@ void DatabaseCatalog::dropTablesParallel(std::vector<DatabaseCatalog::TablesMark
|
||||
if (tables_to_drop.empty())
|
||||
return;
|
||||
|
||||
ThreadPool pool(
|
||||
CurrentMetrics::DatabaseCatalogThreads,
|
||||
CurrentMetrics::DatabaseCatalogThreadsActive,
|
||||
CurrentMetrics::DatabaseCatalogThreadsScheduled,
|
||||
/* max_threads */drop_table_concurrency,
|
||||
/* max_free_threads */0,
|
||||
/* queue_size */tables_to_drop.size());
|
||||
ThreadPoolCallbackRunnerLocal<void> runner(getDatabaseCatalogDropTablesThreadPool().get(), "DropTables");
|
||||
|
||||
for (const auto & item : tables_to_drop)
|
||||
{
|
||||
@ -1332,7 +1321,7 @@ void DatabaseCatalog::dropTablesParallel(std::vector<DatabaseCatalog::TablesMark
|
||||
++first_async_drop_in_queue;
|
||||
|
||||
tables_marked_dropped.splice(tables_marked_dropped.end(), tables_marked_dropped, table_iterator);
|
||||
table_iterator->drop_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()) + drop_error_cooldown_sec;
|
||||
table_iterator->drop_time = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()) + getContext()->getServerSettings().database_catalog_drop_error_cooldown_sec;
|
||||
|
||||
if (first_async_drop_in_queue == tables_marked_dropped.end())
|
||||
--first_async_drop_in_queue;
|
||||
@ -1340,25 +1329,10 @@ void DatabaseCatalog::dropTablesParallel(std::vector<DatabaseCatalog::TablesMark
|
||||
}
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
pool.scheduleOrThrowOnError(std::move(job));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Cannot drop tables. Will retry later.");
|
||||
break;
|
||||
}
|
||||
runner(std::move(job));
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
pool.wait();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Cannot drop tables. Will retry later.");
|
||||
}
|
||||
runner.waitForAllToFinishAndRethrowFirstError();
|
||||
}
|
||||
|
||||
void DatabaseCatalog::dropTableDataTask()
|
||||
@ -1375,7 +1349,15 @@ void DatabaseCatalog::dropTableDataTask()
|
||||
LOG_INFO(log, "Have {} tables in drop queue ({} of them are in use), will try drop {} tables",
|
||||
drop_tables_count, drop_tables_in_use_count, tables_to_drop.size());
|
||||
|
||||
dropTablesParallel(tables_to_drop);
|
||||
try
|
||||
{
|
||||
dropTablesParallel(tables_to_drop);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// We don't re-throw exception, because we are in a background pool.
|
||||
tryLogCurrentException(log, "Cannot drop tables. Will retry later.");
|
||||
}
|
||||
}
|
||||
|
||||
rescheduleDropTableTask();
|
||||
@ -1425,7 +1407,10 @@ void DatabaseCatalog::waitTableFinallyDropped(const UUID & uuid)
|
||||
});
|
||||
|
||||
/// TSA doesn't support unique_lock
|
||||
if (TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid))
|
||||
const bool has_table = TSA_SUPPRESS_WARNING_FOR_READ(tables_marked_dropped_ids).contains(uuid);
|
||||
LOG_DEBUG(log, "Done waiting for the table {} to be dropped. The outcome: {}", toString(uuid), has_table ? "table still exists" : "table dropped successfully");
|
||||
|
||||
if (has_table)
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Did not finish dropping the table with UUID {} because the server is shutting down, "
|
||||
"will finish after restart", uuid);
|
||||
}
|
||||
@ -1718,7 +1703,7 @@ void DatabaseCatalog::cleanupStoreDirectoryTask()
|
||||
LOG_TEST(log, "Nothing to clean up from store/ on disk {}", disk_name);
|
||||
}
|
||||
|
||||
(*cleanup_task)->scheduleAfter(unused_dir_cleanup_period_sec * 1000);
|
||||
(*cleanup_task)->scheduleAfter(static_cast<time_t>(getContext()->getServerSettings().database_catalog_unused_dir_cleanup_period_sec) * 1000);
|
||||
}
|
||||
|
||||
bool DatabaseCatalog::maybeRemoveDirectory(const String & disk_name, const DiskPtr & disk, const String & unused_dir)
|
||||
@ -1742,7 +1727,7 @@ bool DatabaseCatalog::maybeRemoveDirectory(const String & disk_name, const DiskP
|
||||
time_t current_time = time(nullptr);
|
||||
if (st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO))
|
||||
{
|
||||
if (current_time <= max_modification_time + unused_dir_hide_timeout_sec)
|
||||
if (current_time <= max_modification_time + static_cast<time_t>(getContext()->getServerSettings().database_catalog_unused_dir_hide_timeout_sec))
|
||||
return false;
|
||||
|
||||
LOG_INFO(log, "Removing access rights for unused directory {} from disk {} (will remove it when timeout exceed)", unused_dir, disk_name);
|
||||
@ -1758,6 +1743,8 @@ bool DatabaseCatalog::maybeRemoveDirectory(const String & disk_name, const DiskP
|
||||
}
|
||||
else
|
||||
{
|
||||
auto unused_dir_rm_timeout_sec = static_cast<time_t>(getContext()->getServerSettings().database_catalog_unused_dir_rm_timeout_sec);
|
||||
|
||||
if (!unused_dir_rm_timeout_sec)
|
||||
return false;
|
||||
|
||||
|
@ -354,23 +354,8 @@ private:
|
||||
mutable std::mutex tables_marked_dropped_mutex;
|
||||
|
||||
std::unique_ptr<BackgroundSchedulePoolTaskHolder> drop_task;
|
||||
static constexpr time_t default_drop_delay_sec = 8 * 60;
|
||||
time_t drop_delay_sec = default_drop_delay_sec;
|
||||
std::condition_variable wait_table_finally_dropped;
|
||||
|
||||
std::unique_ptr<BackgroundSchedulePoolTaskHolder> cleanup_task;
|
||||
static constexpr time_t default_unused_dir_hide_timeout_sec = 60 * 60; /// 1 hour
|
||||
time_t unused_dir_hide_timeout_sec = default_unused_dir_hide_timeout_sec;
|
||||
static constexpr time_t default_unused_dir_rm_timeout_sec = 30 * 24 * 60 * 60; /// 30 days
|
||||
time_t unused_dir_rm_timeout_sec = default_unused_dir_rm_timeout_sec;
|
||||
static constexpr time_t default_unused_dir_cleanup_period_sec = 24 * 60 * 60; /// 1 day
|
||||
time_t unused_dir_cleanup_period_sec = default_unused_dir_cleanup_period_sec;
|
||||
|
||||
static constexpr time_t default_drop_error_cooldown_sec = 5;
|
||||
time_t drop_error_cooldown_sec = default_drop_error_cooldown_sec;
|
||||
|
||||
static constexpr size_t default_drop_table_concurrency = 10;
|
||||
size_t drop_table_concurrency = default_drop_table_concurrency;
|
||||
|
||||
std::unique_ptr<BackgroundSchedulePoolTaskHolder> reload_disks_task;
|
||||
std::mutex reload_disks_mutex;
|
||||
|
@ -74,7 +74,7 @@ void ExecuteScalarSubqueriesMatcher::visit(ASTPtr & ast, Data & data)
|
||||
static auto getQueryInterpreter(const ASTSubquery & subquery, ExecuteScalarSubqueriesMatcher::Data & data)
|
||||
{
|
||||
auto subquery_context = Context::createCopy(data.getContext());
|
||||
Settings subquery_settings = data.getContext()->getSettings();
|
||||
Settings subquery_settings = data.getContext()->getSettingsCopy();
|
||||
subquery_settings.max_result_rows = 1;
|
||||
subquery_settings.extremes = false;
|
||||
subquery_context->setSettings(subquery_settings);
|
||||
|
@ -214,10 +214,6 @@ static void setLazyExecutionInfo(
|
||||
}
|
||||
|
||||
lazy_execution_info.short_circuit_ancestors_info[parent].insert(indexes.begin(), indexes.end());
|
||||
/// After checking arguments_with_disabled_lazy_execution, if there is no relation with parent,
|
||||
/// disable the current node.
|
||||
if (indexes.empty())
|
||||
lazy_execution_info.can_be_lazy_executed = false;
|
||||
}
|
||||
else
|
||||
/// If lazy execution is disabled for one of parents, we should disable it for current node.
|
||||
|
@ -171,7 +171,7 @@ ExpressionAnalyzer::ExpressionAnalyzer(
|
||||
PreparedSetsPtr prepared_sets_,
|
||||
bool is_create_parameterized_view_)
|
||||
: WithContext(context_)
|
||||
, query(query_), settings(getContext()->getSettings())
|
||||
, query(query_), settings(getContext()->getSettingsRef())
|
||||
, subquery_depth(subquery_depth_)
|
||||
, syntax(syntax_analyzer_result_)
|
||||
, is_create_parameterized_view(is_create_parameterized_view_)
|
||||
@ -984,7 +984,7 @@ static std::shared_ptr<IJoin> tryCreateJoin(
|
||||
algorithm == JoinAlgorithm::PARALLEL_HASH ||
|
||||
algorithm == JoinAlgorithm::DEFAULT)
|
||||
{
|
||||
const auto & settings = context->getSettings();
|
||||
const auto & settings = context->getSettingsRef();
|
||||
|
||||
if (analyzed_join->allowParallelHashJoin())
|
||||
return std::make_shared<ConcurrentHashJoin>(
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Access/Common/AccessRightsElement.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <Databases/DatabaseFactory.h>
|
||||
#include <Databases/DatabaseReplicated.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
@ -47,6 +48,7 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int UNKNOWN_DATABASE;
|
||||
extern const int QUERY_IS_PROHIBITED;
|
||||
}
|
||||
|
||||
|
||||
@ -191,6 +193,12 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter)
|
||||
"to execute ALTERs of different types (replicated and non replicated) in single query");
|
||||
}
|
||||
|
||||
if (mutation_commands.hasNonEmptyMutationCommands() || !partition_commands.empty())
|
||||
{
|
||||
if (getContext()->getServerSettings().disable_insertion_and_mutation)
|
||||
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Mutations are prohibited");
|
||||
}
|
||||
|
||||
if (!alter_commands.empty())
|
||||
{
|
||||
auto alter_lock = table->lockForAlter(getContext()->getSettingsRef().lock_acquire_timeout);
|
||||
|
@ -361,18 +361,10 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
|
||||
TablesLoader loader{getContext()->getGlobalContext(), {{database_name, database}}, mode};
|
||||
auto load_tasks = loader.loadTablesAsync();
|
||||
auto startup_tasks = loader.startupTablesAsync();
|
||||
if (getContext()->getGlobalContext()->getServerSettings().async_load_databases)
|
||||
{
|
||||
scheduleLoad(load_tasks);
|
||||
scheduleLoad(startup_tasks);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// First prioritize, schedule and wait all the load table tasks
|
||||
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), load_tasks);
|
||||
/// Only then prioritize, schedule and wait all the startup tasks
|
||||
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks);
|
||||
}
|
||||
/// First prioritize, schedule and wait all the load table tasks
|
||||
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), load_tasks);
|
||||
/// Only then prioritize, schedule and wait all the startup tasks
|
||||
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks);
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
@ -959,40 +951,12 @@ namespace
|
||||
engine_ast->no_empty_args = true;
|
||||
storage.set(storage.engine, engine_ast);
|
||||
}
|
||||
|
||||
void setNullTableEngine(ASTStorage & storage)
|
||||
{
|
||||
auto engine_ast = std::make_shared<ASTFunction>();
|
||||
engine_ast->name = "Null";
|
||||
engine_ast->no_empty_args = true;
|
||||
storage.set(storage.engine, engine_ast);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
{
|
||||
if (create.as_table_function)
|
||||
{
|
||||
if (getContext()->getSettingsRef().restore_replace_external_table_functions_to_null)
|
||||
{
|
||||
const auto & factory = TableFunctionFactory::instance();
|
||||
|
||||
auto properties = factory.tryGetProperties(create.as_table_function->as<ASTFunction>()->name);
|
||||
if (properties && properties->allow_readonly)
|
||||
return;
|
||||
if (!create.storage)
|
||||
{
|
||||
auto storage_ast = std::make_shared<ASTStorage>();
|
||||
create.set(create.storage, storage_ast);
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage should not be created yet, it's a bug.");
|
||||
create.as_table_function = nullptr;
|
||||
setNullTableEngine(*create.storage);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (create.is_dictionary || create.is_ordinary_view || create.is_live_view || create.is_window_view)
|
||||
return;
|
||||
@ -1043,13 +1007,6 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
/// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one.
|
||||
setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value);
|
||||
}
|
||||
/// For external tables with restore_replace_external_engine_to_null setting we replace external engines to
|
||||
/// Null table engine.
|
||||
else if (getContext()->getSettingsRef().restore_replace_external_engines_to_null)
|
||||
{
|
||||
if (StorageFactory::instance().getStorageFeatures(create.storage->engine->name).source_access_type != AccessType::NONE)
|
||||
setNullTableEngine(*create.storage);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <Databases/DatabaseReplicated.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Interpreters/Context.h>
|
||||
@ -27,6 +28,7 @@ namespace ErrorCodes
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int QUERY_IS_PROHIBITED;
|
||||
}
|
||||
|
||||
|
||||
@ -51,6 +53,9 @@ BlockIO InterpreterDeleteQuery::execute()
|
||||
if (table->isStaticStorage())
|
||||
throw Exception(ErrorCodes::TABLE_IS_READ_ONLY, "Table is read-only");
|
||||
|
||||
if (getContext()->getGlobalContext()->getServerSettings().disable_insertion_and_mutation)
|
||||
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Delete queries are prohibited");
|
||||
|
||||
DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name);
|
||||
if (database->shouldReplicateQuery(getContext(), query_ptr))
|
||||
{
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Interpreters/InterpreterDropQuery.h>
|
||||
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||
#include <Interpreters/QueryLog.h>
|
||||
#include <IO/SharedThreadPools.h>
|
||||
#include <Access/Common/AccessRightsElement.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
@ -424,18 +425,29 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query,
|
||||
auto table_context = Context::createCopy(getContext());
|
||||
table_context->setInternalQuery(true);
|
||||
/// Do not hold extra shared pointers to tables
|
||||
std::vector<std::pair<String, bool>> tables_to_drop;
|
||||
std::vector<std::pair<StorageID, bool>> tables_to_drop;
|
||||
// NOTE: This means we wait for all tables to be loaded inside getTablesIterator() call in case of `async_load_databases = true`.
|
||||
for (auto iterator = database->getTablesIterator(table_context); iterator->isValid(); iterator->next())
|
||||
{
|
||||
auto table_ptr = iterator->table();
|
||||
table_ptr->flushAndPrepareForShutdown();
|
||||
tables_to_drop.push_back({iterator->name(), table_ptr->isDictionary()});
|
||||
tables_to_drop.push_back({table_ptr->getStorageID(), table_ptr->isDictionary()});
|
||||
}
|
||||
|
||||
/// Prepare tables for shutdown in parallel.
|
||||
ThreadPoolCallbackRunnerLocal<void> runner(getDatabaseCatalogDropTablesThreadPool().get(), "DropTables");
|
||||
for (const auto & [name, _] : tables_to_drop)
|
||||
{
|
||||
auto table_ptr = DatabaseCatalog::instance().getTable(name, table_context);
|
||||
runner([my_table_ptr = std::move(table_ptr)]()
|
||||
{
|
||||
my_table_ptr->flushAndPrepareForShutdown();
|
||||
});
|
||||
}
|
||||
runner.waitForAllToFinishAndRethrowFirstError();
|
||||
|
||||
for (const auto & table : tables_to_drop)
|
||||
{
|
||||
query_for_table.setTable(table.first);
|
||||
query_for_table.setTable(table.first.getTableName());
|
||||
query_for_table.is_dictionary = table.second;
|
||||
DatabasePtr db;
|
||||
UUID table_to_wait = UUIDHelpers::Nil;
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/ServerSettings.h>
|
||||
#include <Processors/Transforms/buildPushingToViewsChain.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
@ -60,6 +61,7 @@ namespace ErrorCodes
|
||||
extern const int NO_SUCH_COLUMN_IN_TABLE;
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
extern const int DUPLICATE_COLUMN;
|
||||
extern const int QUERY_IS_PROHIBITED;
|
||||
}
|
||||
|
||||
InterpreterInsertQuery::InterpreterInsertQuery(
|
||||
@ -464,7 +466,7 @@ QueryPipeline InterpreterInsertQuery::buildInsertSelectPipeline(ASTInsertQuery &
|
||||
* to avoid unnecessary squashing.
|
||||
*/
|
||||
|
||||
Settings new_settings = select_context->getSettings();
|
||||
Settings new_settings = select_context->getSettingsCopy();
|
||||
|
||||
new_settings.max_threads = std::max<UInt64>(1, settings.max_insert_threads);
|
||||
|
||||
@ -732,6 +734,9 @@ BlockIO InterpreterInsertQuery::execute()
|
||||
const Settings & settings = getContext()->getSettingsRef();
|
||||
auto & query = query_ptr->as<ASTInsertQuery &>();
|
||||
|
||||
if (getContext()->getServerSettings().disable_insertion_and_mutation
|
||||
&& query.table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE)
|
||||
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Insert queries are prohibited");
|
||||
|
||||
StoragePtr table = getTable(query);
|
||||
checkStorageSupportsTransactionsIfNeeded(table, getContext());
|
||||
|
@ -249,7 +249,7 @@ namespace
|
||||
ContextPtr getSubqueryContext(const ContextPtr & context)
|
||||
{
|
||||
auto subquery_context = Context::createCopy(context);
|
||||
Settings subquery_settings = context->getSettings();
|
||||
Settings subquery_settings = context->getSettingsCopy();
|
||||
subquery_settings.max_result_rows = 0;
|
||||
subquery_settings.max_result_bytes = 0;
|
||||
/// The calculation of extremes does not make sense and is not necessary (if you do it, then the extremes of the subquery can be taken for whole query).
|
||||
|
@ -308,7 +308,7 @@ std::shared_ptr<TableJoin> JoinedTables::makeTableJoin(const ASTSelectQuery & se
|
||||
if (tables_with_columns.size() < 2)
|
||||
return {};
|
||||
|
||||
auto settings = context->getSettingsRef();
|
||||
const auto & settings = context->getSettingsRef();
|
||||
MultiEnum<JoinAlgorithm> join_algorithm = settings.join_algorithm;
|
||||
bool try_use_direct_join = join_algorithm.isSet(JoinAlgorithm::DIRECT) || join_algorithm.isSet(JoinAlgorithm::DEFAULT);
|
||||
auto table_join = std::make_shared<TableJoin>(settings, context->getGlobalTemporaryVolume(), context->getTempDataOnDisk());
|
||||
|
@ -657,7 +657,7 @@ QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_even
|
||||
{
|
||||
if (auto ctx = context.lock())
|
||||
{
|
||||
res.query_settings = std::make_shared<Settings>(ctx->getSettings());
|
||||
res.query_settings = std::make_shared<Settings>(ctx->getSettingsRef());
|
||||
res.current_database = ctx->getCurrentDatabase();
|
||||
}
|
||||
}
|
||||
|
@ -14,41 +14,37 @@
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
ContextMutablePtr context;
|
||||
|
||||
extern "C" int LLVMFuzzerInitialize(int *, char ***)
|
||||
{
|
||||
if (context)
|
||||
return true;
|
||||
|
||||
SharedContextHolder shared_context = Context::createShared();
|
||||
context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
|
||||
registerInterpreters();
|
||||
registerFunctions();
|
||||
registerAggregateFunctions();
|
||||
registerTableFunctions();
|
||||
registerDatabases();
|
||||
registerStorages();
|
||||
registerDictionaries();
|
||||
registerDisks(/* global_skip_access_check= */ true);
|
||||
registerFormats();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||
{
|
||||
try
|
||||
{
|
||||
std::string input = std::string(reinterpret_cast<const char*>(data), size);
|
||||
|
||||
static SharedContextHolder shared_context;
|
||||
static ContextMutablePtr context;
|
||||
|
||||
auto initialize = [&]() mutable
|
||||
{
|
||||
if (context)
|
||||
return true;
|
||||
|
||||
shared_context = Context::createShared();
|
||||
context = Context::createGlobal(shared_context.get());
|
||||
context->makeGlobalContext();
|
||||
context->setApplicationType(Context::ApplicationType::LOCAL);
|
||||
|
||||
registerInterpreters();
|
||||
registerFunctions();
|
||||
registerAggregateFunctions();
|
||||
registerTableFunctions();
|
||||
registerDatabases();
|
||||
registerStorages();
|
||||
registerDictionaries();
|
||||
registerDisks(/* global_skip_access_check= */ true);
|
||||
registerFormats();
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
static bool initialized = initialize();
|
||||
(void) initialized;
|
||||
|
||||
auto io = DB::executeQuery(input, context, QueryFlags{ .internal = true }, QueryProcessingStage::Complete).second;
|
||||
|
||||
PullingPipelineExecutor executor(io.pipeline);
|
||||
|
@ -62,7 +62,7 @@ std::shared_ptr<InterpreterSelectWithUnionQuery> interpretSubquery(
|
||||
* which are checked separately (in the Set, Join objects).
|
||||
*/
|
||||
auto subquery_context = Context::createCopy(context);
|
||||
Settings subquery_settings = context->getSettings();
|
||||
Settings subquery_settings = context->getSettingsCopy();
|
||||
subquery_settings.max_result_rows = 0;
|
||||
subquery_settings.max_result_bytes = 0;
|
||||
/// The calculation of `extremes` does not make sense and is not necessary (if you do it, then the `extremes` of the subquery can be taken instead of the whole query).
|
||||
|
@ -285,6 +285,8 @@ static bool formatNamedArgWithHiddenValue(IAST * arg, const IAST::FormatSettings
|
||||
void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
frame.expression_list_prepend_whitespace = false;
|
||||
if (kind == Kind::CODEC || kind == Kind::STATISTICS || kind == Kind::BACKUP_NAME)
|
||||
frame.allow_operators = false;
|
||||
FormatStateStacked nested_need_parens = frame;
|
||||
FormatStateStacked nested_dont_need_parens = frame;
|
||||
nested_need_parens.need_parens = true;
|
||||
@ -308,7 +310,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format
|
||||
|
||||
/// Should this function to be written as operator?
|
||||
bool written = false;
|
||||
if (arguments && !parameters && nulls_action == NullsAction::EMPTY)
|
||||
if (arguments && !parameters && frame.allow_operators && nulls_action == NullsAction::EMPTY)
|
||||
{
|
||||
/// Unary prefix operators.
|
||||
if (arguments->children.size() == 1)
|
||||
|
@ -58,6 +58,8 @@ public:
|
||||
TABLE_ENGINE,
|
||||
DATABASE_ENGINE,
|
||||
BACKUP_NAME,
|
||||
CODEC,
|
||||
STATISTICS,
|
||||
};
|
||||
Kind kind = Kind::ORDINARY_FUNCTION;
|
||||
|
||||
|
@ -696,6 +696,7 @@ bool ParserCodec::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
|
||||
auto function_node = std::make_shared<ASTFunction>();
|
||||
function_node->name = "CODEC";
|
||||
function_node->kind = ASTFunction::Kind::CODEC;
|
||||
function_node->arguments = expr_list_args;
|
||||
function_node->children.push_back(function_node->arguments);
|
||||
|
||||
@ -723,6 +724,7 @@ bool ParserStatisticsType::parseImpl(Pos & pos, ASTPtr & node, Expected & expect
|
||||
|
||||
auto function_node = std::make_shared<ASTFunction>();
|
||||
function_node->name = "STATISTICS";
|
||||
function_node->kind = ASTFunction::Kind::STATISTICS;
|
||||
function_node->arguments = stat_type;
|
||||
function_node->children.push_back(function_node->arguments);
|
||||
node = function_node;
|
||||
|
@ -33,7 +33,9 @@ public:
|
||||
{
|
||||
case ASTFunction::Kind::ORDINARY_FUNCTION: findOrdinaryFunctionSecretArguments(); break;
|
||||
case ASTFunction::Kind::WINDOW_FUNCTION: break;
|
||||
case ASTFunction::Kind::LAMBDA_FUNCTION: break;
|
||||
case ASTFunction::Kind::LAMBDA_FUNCTION: break;
|
||||
case ASTFunction::Kind::CODEC: break;
|
||||
case ASTFunction::Kind::STATISTICS: break;
|
||||
case ASTFunction::Kind::TABLE_ENGINE: findTableEngineSecretArguments(); break;
|
||||
case ASTFunction::Kind::DATABASE_ENGINE: findDatabaseEngineSecretArguments(); break;
|
||||
case ASTFunction::Kind::BACKUP_NAME: findBackupNameSecretArguments(); break;
|
||||
|
@ -256,6 +256,7 @@ public:
|
||||
bool expression_list_always_start_on_new_line = false; /// Line feed and indent before expression list even if it's of single element.
|
||||
bool expression_list_prepend_whitespace = false; /// Prepend whitespace (if it is required)
|
||||
bool surround_each_list_element_with_parens = false;
|
||||
bool allow_operators = true; /// Format some functions, such as "plus", "in", etc. as operators.
|
||||
size_t list_element_index = 0;
|
||||
const IAST * current_select = nullptr;
|
||||
};
|
||||
|
@ -27,7 +27,8 @@ DEFINE_BINARY_PROTO_FUZZER(const Sentence& main)
|
||||
DB::ParserQueryWithOutput parser(input.data() + input.size());
|
||||
try
|
||||
{
|
||||
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
|
||||
DB::ASTPtr ast
|
||||
= parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0, DB::DBMS_DEFAULT_MAX_PARSER_BACKTRACKS);
|
||||
|
||||
DB::WriteBufferFromOStream out(std::cerr, 4096);
|
||||
DB::formatAST(*ast, out);
|
||||
|
@ -14,7 +14,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||
std::string input = std::string(reinterpret_cast<const char*>(data), size);
|
||||
|
||||
DB::ParserCreateQuery parser;
|
||||
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000);
|
||||
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 1000, DB::DBMS_DEFAULT_MAX_PARSER_BACKTRACKS);
|
||||
|
||||
const UInt64 max_ast_depth = 1000;
|
||||
ast->checkDepth(max_ast_depth);
|
||||
|
@ -900,11 +900,7 @@ bool NativeORCBlockInputFormat::prepareStripeReader()
|
||||
|
||||
orc::RowReaderOptions row_reader_options;
|
||||
row_reader_options.includeTypes(include_indices);
|
||||
if (format_settings.orc.read_use_writer_time_zone)
|
||||
{
|
||||
String writer_time_zone = current_stripe_info->getWriterTimezone();
|
||||
row_reader_options.setTimezoneName(writer_time_zone);
|
||||
}
|
||||
row_reader_options.setTimezoneName(format_settings.orc.reader_time_zone_name);
|
||||
row_reader_options.range(current_stripe_info->getOffset(), current_stripe_info->getLength());
|
||||
if (format_settings.orc.filter_push_down && sarg)
|
||||
{
|
||||
|
@ -406,7 +406,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx
|
||||
{
|
||||
const Block & header = getPort().getHeader();
|
||||
const IDataType & type = *header.getByPosition(column_idx).type;
|
||||
auto settings = context->getSettingsRef();
|
||||
const auto & settings = context->getSettingsRef();
|
||||
|
||||
/// Advance the token iterator until the start of the column expression
|
||||
readUntilTheEndOfRowAndReTokenize(column_idx);
|
||||
|
@ -134,7 +134,6 @@ AggregatingStep::AggregatingStep(
|
||||
{
|
||||
output_stream->sort_description = group_by_sort_description;
|
||||
output_stream->sort_scope = DataStream::SortScope::Global;
|
||||
output_stream->has_single_port = true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,7 +146,6 @@ void AggregatingStep::applyOrder(SortDescription sort_description_for_merging_,
|
||||
{
|
||||
output_stream->sort_description = group_by_sort_description;
|
||||
output_stream->sort_scope = DataStream::SortScope::Global;
|
||||
output_stream->has_single_port = true;
|
||||
}
|
||||
|
||||
explicit_sorting_required_for_aggregation_in_order = false;
|
||||
|
@ -10,6 +10,11 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static ITransformingStep::Traits getTraits(bool pre_distinct)
|
||||
{
|
||||
const bool preserves_number_of_streams = pre_distinct;
|
||||
@ -90,7 +95,8 @@ void DistinctStep::transformPipeline(QueryPipelineBuilder & pipeline, const Buil
|
||||
/// final distinct for sorted stream (sorting inside and among chunks)
|
||||
if (input_stream.sort_scope == DataStream::SortScope::Global)
|
||||
{
|
||||
assert(input_stream.has_single_port);
|
||||
if (pipeline.getNumStreams() != 1)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "DistinctStep with in-order expects single input");
|
||||
|
||||
if (distinct_sort_desc.size() < columns.size())
|
||||
{
|
||||
|
@ -39,12 +39,13 @@ FillingStep::FillingStep(
|
||||
, interpolate_description(interpolate_description_)
|
||||
, use_with_fill_by_sorting_prefix(use_with_fill_by_sorting_prefix_)
|
||||
{
|
||||
if (!input_stream_.has_single_port)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "FillingStep expects single input");
|
||||
}
|
||||
|
||||
void FillingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
|
||||
{
|
||||
if (pipeline.getNumStreams() != 1)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "FillingStep expects single input");
|
||||
|
||||
pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) -> ProcessorPtr
|
||||
{
|
||||
if (stream_type == QueryPipelineBuilder::StreamType::Totals)
|
||||
@ -69,9 +70,6 @@ void FillingStep::describeActions(JSONBuilder::JSONMap & map) const
|
||||
|
||||
void FillingStep::updateOutputStream()
|
||||
{
|
||||
if (!input_streams.front().has_single_port)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "FillingStep expects single input");
|
||||
|
||||
output_stream = createOutputStream(
|
||||
input_streams.front(), FillingTransform::transformHeader(input_streams.front().header, sort_description), getDataStreamTraits());
|
||||
}
|
||||
|
@ -28,9 +28,6 @@ class DataStream
|
||||
public:
|
||||
Block header;
|
||||
|
||||
/// QueryPipeline has single port. Totals or extremes ports are not counted.
|
||||
bool has_single_port = false;
|
||||
|
||||
/// Sorting scope. Please keep the mutual order (more strong mode should have greater value).
|
||||
enum class SortScope : uint8_t
|
||||
{
|
||||
@ -51,8 +48,7 @@ public:
|
||||
|
||||
bool hasEqualPropertiesWith(const DataStream & other) const
|
||||
{
|
||||
return has_single_port == other.has_single_port
|
||||
&& sort_description == other.sort_description
|
||||
return sort_description == other.sort_description
|
||||
&& (sort_description.empty() || sort_scope == other.sort_scope);
|
||||
}
|
||||
|
||||
|
@ -20,9 +20,6 @@ DataStream ITransformingStep::createOutputStream(
|
||||
{
|
||||
DataStream output_stream{.header = std::move(output_header)};
|
||||
|
||||
output_stream.has_single_port = stream_traits.returns_single_stream
|
||||
|| (input_stream.has_single_port && stream_traits.preserves_number_of_streams);
|
||||
|
||||
if (stream_traits.preserves_sorting)
|
||||
{
|
||||
output_stream.sort_description = input_stream.sort_description;
|
||||
|
@ -1055,7 +1055,7 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node,
|
||||
}
|
||||
|
||||
auto context = read_from_merge_tree->getContext();
|
||||
const auto & settings = context->getSettings();
|
||||
const auto & settings = context->getSettingsRef();
|
||||
if (!settings.optimize_read_in_window_order || (settings.optimize_read_in_order && settings.query_plan_read_in_order) || context->getSettingsRef().allow_experimental_analyzer)
|
||||
{
|
||||
return 0;
|
||||
|
@ -6,7 +6,7 @@ namespace DB
|
||||
{
|
||||
|
||||
ReadNothingStep::ReadNothingStep(Block output_header)
|
||||
: ISourceStep(DataStream{.header = std::move(output_header), .has_single_port = true})
|
||||
: ISourceStep(DataStream{.header = std::move(output_header)})
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,9 @@
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/FieldVisitorConvertToNumber.h>
|
||||
#include <Common/FieldVisitorsAccurateComparison.h>
|
||||
#include <Functions/CastOverloadResolver.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
#include <Common/logger_useful.h>
|
||||
@ -78,6 +81,8 @@ public:
|
||||
|
||||
virtual std::optional<WindowFrame> getDefaultFrame() const { return {}; }
|
||||
|
||||
virtual ColumnPtr castColumn(const Columns &, const std::vector<size_t> &) { return nullptr; }
|
||||
|
||||
/// Is the frame type supported by this function.
|
||||
virtual bool checkWindowFrameType(const WindowTransform * /*transform*/) const { return true; }
|
||||
};
|
||||
@ -1174,6 +1179,9 @@ void WindowTransform::appendChunk(Chunk & chunk)
|
||||
// Initialize output columns.
|
||||
for (auto & ws : workspaces)
|
||||
{
|
||||
if (ws.window_function_impl)
|
||||
block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices));
|
||||
|
||||
block.output_columns.push_back(ws.aggregate_function->getResultType()
|
||||
->createColumn());
|
||||
block.output_columns.back()->reserve(block.rows);
|
||||
@ -2361,6 +2369,8 @@ public:
|
||||
template <bool is_lead>
|
||||
struct WindowFunctionLagLeadInFrame final : public WindowFunction
|
||||
{
|
||||
FunctionBasePtr func_cast = nullptr;
|
||||
|
||||
WindowFunctionLagLeadInFrame(const std::string & name_,
|
||||
const DataTypes & argument_types_, const Array & parameters_)
|
||||
: WindowFunction(name_, argument_types_, parameters_, createResultType(argument_types_, name_))
|
||||
@ -2388,7 +2398,17 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction
|
||||
return;
|
||||
}
|
||||
|
||||
const auto supertype = getLeastSupertype(DataTypes{argument_types[0], argument_types[2]});
|
||||
if (argument_types.size() > 3)
|
||||
{
|
||||
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
|
||||
"Function '{}' accepts at most 3 arguments, {} given",
|
||||
name, argument_types.size());
|
||||
}
|
||||
|
||||
if (argument_types[0]->equals(*argument_types[2]))
|
||||
return;
|
||||
|
||||
const auto supertype = tryGetLeastSupertype(DataTypes{argument_types[0], argument_types[2]});
|
||||
if (!supertype)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
@ -2405,12 +2425,44 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction
|
||||
argument_types[2]->getName());
|
||||
}
|
||||
|
||||
if (argument_types.size() > 3)
|
||||
const auto from_name = argument_types[2]->getName();
|
||||
const auto to_name = argument_types[0]->getName();
|
||||
ColumnsWithTypeAndName arguments
|
||||
{
|
||||
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
|
||||
"Function '{}' accepts at most 3 arguments, {} given",
|
||||
name, argument_types.size());
|
||||
}
|
||||
{ argument_types[2], "" },
|
||||
{
|
||||
DataTypeString().createColumnConst(0, to_name),
|
||||
std::make_shared<DataTypeString>(),
|
||||
""
|
||||
}
|
||||
};
|
||||
|
||||
auto get_cast_func = [&arguments]
|
||||
{
|
||||
FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::accurate, {});
|
||||
return func_builder_cast->build(arguments);
|
||||
};
|
||||
|
||||
func_cast = get_cast_func();
|
||||
|
||||
}
|
||||
|
||||
ColumnPtr castColumn(const Columns & columns, const std::vector<size_t> & idx) override
|
||||
{
|
||||
if (!func_cast)
|
||||
return nullptr;
|
||||
|
||||
ColumnsWithTypeAndName arguments
|
||||
{
|
||||
{ columns[idx[2]], argument_types[2], "" },
|
||||
{
|
||||
DataTypeString().createColumnConst(columns[idx[2]]->size(), argument_types[0]->getName()),
|
||||
std::make_shared<DataTypeString>(),
|
||||
""
|
||||
}
|
||||
};
|
||||
|
||||
return func_cast->execute(arguments, argument_types[0], columns[idx[2]]->size());
|
||||
}
|
||||
|
||||
static DataTypePtr createResultType(const DataTypes & argument_types_, const std::string & name_)
|
||||
@ -2460,12 +2512,11 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction
|
||||
if (argument_types.size() > 2)
|
||||
{
|
||||
// Column with default values is specified.
|
||||
// The conversion through Field is inefficient, but we accept
|
||||
// subtypes of the argument type as a default value (for convenience),
|
||||
// and it's a pain to write conversion that respects ColumnNothing
|
||||
// and ColumnConst and so on.
|
||||
const IColumn & default_column = *current_block.input_columns[
|
||||
workspace.argument_column_indices[2]].get();
|
||||
const IColumn & default_column =
|
||||
current_block.casted_columns[function_index] ?
|
||||
*current_block.casted_columns[function_index].get() :
|
||||
*current_block.input_columns[workspace.argument_column_indices[2]].get();
|
||||
|
||||
to.insert(default_column[transform->current_row.row]);
|
||||
}
|
||||
else
|
||||
|
@ -50,6 +50,7 @@ struct WindowTransformBlock
|
||||
{
|
||||
Columns original_input_columns;
|
||||
Columns input_columns;
|
||||
Columns casted_columns;
|
||||
MutableColumns output_columns;
|
||||
|
||||
size_t rows = 0;
|
||||
|
@ -473,7 +473,7 @@ void MySQLHandler::comQuery(ReadBuffer & payload, bool binary_protocol)
|
||||
query_context->setCurrentQueryId(fmt::format("mysql:{}:{}", connection_id, toString(UUIDHelpers::generateV4())));
|
||||
|
||||
/// --- Workaround for Bug 56173. Can be removed when the analyzer is on by default.
|
||||
auto settings = query_context->getSettings();
|
||||
auto settings = query_context->getSettingsCopy();
|
||||
settings.prefer_column_name_to_alias = true;
|
||||
query_context->setSettings(settings);
|
||||
|
||||
|
@ -28,6 +28,7 @@ namespace ErrorCodes
|
||||
extern const int TOO_MANY_PARTITIONS;
|
||||
extern const int DISTRIBUTED_TOO_MANY_PENDING_BYTES;
|
||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
/// Can the batch be split and send files from batch one-by-one instead?
|
||||
@ -241,8 +242,12 @@ void DistributedAsyncInsertBatch::sendBatch(const SettingsChanges & settings_cha
|
||||
insert_settings.applyChanges(settings_changes);
|
||||
|
||||
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings);
|
||||
auto result = parent.pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, parent.storage.remote_storage.getQualifiedName());
|
||||
connection = std::move(result.front().entry);
|
||||
auto results = parent.pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, parent.storage.remote_storage.getQualifiedName());
|
||||
auto result = results.front();
|
||||
if (parent.pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result");
|
||||
|
||||
connection = std::move(result.entry);
|
||||
compression_expected = connection->getCompression() == Protocol::Compression::Enable;
|
||||
|
||||
LOG_DEBUG(parent.log, "Sending a batch of {} files to {} ({} rows, {} bytes).",
|
||||
@ -299,8 +304,12 @@ void DistributedAsyncInsertBatch::sendSeparateFiles(const SettingsChanges & sett
|
||||
parent.storage.getContext()->getOpenTelemetrySpanLog());
|
||||
|
||||
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings);
|
||||
auto result = parent.pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, parent.storage.remote_storage.getQualifiedName());
|
||||
auto connection = std::move(result.front().entry);
|
||||
auto results = parent.pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, parent.storage.remote_storage.getQualifiedName());
|
||||
auto result = results.front();
|
||||
if (parent.pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result");
|
||||
|
||||
auto connection = std::move(result.entry);
|
||||
bool compression_expected = connection->getCompression() == Protocol::Compression::Enable;
|
||||
|
||||
RemoteInserter remote(*connection, timeouts,
|
||||
|
@ -283,7 +283,7 @@ ConnectionPoolWithFailoverPtr DistributedAsyncInsertDirectoryQueue::createPool(c
|
||||
|
||||
auto pools = createPoolsForAddresses(addresses, pool_factory, storage.log);
|
||||
|
||||
const auto settings = storage.getContext()->getSettings();
|
||||
const auto & settings = storage.getContext()->getSettingsRef();
|
||||
return std::make_shared<ConnectionPoolWithFailover>(std::move(pools),
|
||||
settings.load_balancing,
|
||||
settings.distributed_replica_error_half_life.totalSeconds(),
|
||||
@ -412,8 +412,12 @@ void DistributedAsyncInsertDirectoryQueue::processFile(std::string & file_path,
|
||||
insert_settings.applyChanges(settings_changes);
|
||||
|
||||
auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings);
|
||||
auto result = pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName());
|
||||
auto connection = std::move(result.front().entry);
|
||||
auto results = pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName());
|
||||
auto result = results.front();
|
||||
if (pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result");
|
||||
|
||||
auto connection = std::move(result.entry);
|
||||
|
||||
LOG_DEBUG(log, "Sending `{}` to {} ({} rows, {} bytes)",
|
||||
file_path,
|
||||
|
@ -377,7 +377,11 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si
|
||||
/// NOTE: INSERT will also take into account max_replica_delay_for_distributed_queries
|
||||
/// (anyway fallback_to_stale_replicas_for_distributed_queries=true by default)
|
||||
auto results = shard_info.pool->getManyCheckedForInsert(timeouts, settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName());
|
||||
job.connection_entry = std::move(results.front().entry);
|
||||
auto result = results.front();
|
||||
if (shard_info.pool->isTryResultInvalid(result, settings.distributed_insert_skip_read_only_replicas))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result");
|
||||
|
||||
job.connection_entry = std::move(result.entry);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -5557,12 +5557,16 @@ public:
|
||||
auto it = temp_part_dirs.find(part_name);
|
||||
if (it == temp_part_dirs.end())
|
||||
{
|
||||
auto temp_part_dir = std::make_shared<TemporaryFileOnDisk>(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-"));
|
||||
auto temp_dir_deleter = std::make_unique<TemporaryFileOnDisk>(disk, fs::path{storage->getRelativeDataPath()} / ("tmp_restore_" + part_name + "-"));
|
||||
auto temp_part_dir = fs::path{temp_dir_deleter->getRelativePath()}.filename();
|
||||
/// Attaching parts will rename them so it's expected for a temporary part directory not to exist anymore in the end.
|
||||
temp_part_dir->setShowWarningIfRemoved(false);
|
||||
it = temp_part_dirs.emplace(part_name, temp_part_dir).first;
|
||||
temp_dir_deleter->setShowWarningIfRemoved(false);
|
||||
/// The following holder is needed to prevent clearOldTemporaryDirectories() from clearing `temp_part_dir` before we attach the part.
|
||||
auto temp_dir_holder = storage->getTemporaryPartDirectoryHolder(temp_part_dir);
|
||||
it = temp_part_dirs.emplace(part_name,
|
||||
std::make_pair(std::move(temp_dir_deleter), std::move(temp_dir_holder))).first;
|
||||
}
|
||||
return it->second->getRelativePath();
|
||||
return it->second.first->getRelativePath();
|
||||
}
|
||||
|
||||
private:
|
||||
@ -5588,7 +5592,7 @@ private:
|
||||
size_t num_parts = 0;
|
||||
size_t num_broken_parts = 0;
|
||||
MutableDataPartsVector parts;
|
||||
std::map<String /* part_name*/, std::shared_ptr<TemporaryFileOnDisk>> temp_part_dirs;
|
||||
std::map<String /* part_name*/, std::pair<std::unique_ptr<TemporaryFileOnDisk>, scope_guard>> temp_part_dirs;
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
|
||||
|
@ -341,15 +341,19 @@ void MergeTreeDeduplicationLog::shutdown()
|
||||
stopped = true;
|
||||
if (current_writer)
|
||||
{
|
||||
/// If an error has occurred during finalize, we'd like to have the exception set for reset.
|
||||
/// Otherwise, we'll be in a situation when a finalization didn't happen, and we didn't get
|
||||
/// any error, causing logical error (see ~MemoryBuffer()).
|
||||
try
|
||||
{
|
||||
current_writer->finalize();
|
||||
current_writer.reset();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
current_writer.reset();
|
||||
}
|
||||
current_writer.reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ MergedBlockOutputStream::MergedBlockOutputStream(
|
||||
, write_settings(write_settings_)
|
||||
{
|
||||
MergeTreeWriterSettings writer_settings(
|
||||
data_part->storage.getContext()->getSettings(),
|
||||
data_part->storage.getContext()->getSettingsRef(),
|
||||
write_settings,
|
||||
storage_settings,
|
||||
data_part->index_granularity_info.mark_type.adaptive,
|
||||
|
@ -23,7 +23,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream(
|
||||
const MergeTreeIndexGranularityInfo * index_granularity_info)
|
||||
: IMergedBlockOutputStream(data_part->storage.getSettings(), data_part->getDataPartStoragePtr(), metadata_snapshot_, columns_list_, /*reset_columns=*/ true)
|
||||
{
|
||||
const auto & global_settings = data_part->storage.getContext()->getSettings();
|
||||
const auto & global_settings = data_part->storage.getContext()->getSettingsRef();
|
||||
|
||||
MergeTreeWriterSettings writer_settings(
|
||||
global_settings,
|
||||
|
@ -344,7 +344,7 @@ void ReplicatedMergeTreeRestartingThread::partialShutdown(bool part_of_full_shut
|
||||
void ReplicatedMergeTreeRestartingThread::shutdown(bool part_of_full_shutdown)
|
||||
{
|
||||
/// Stop restarting_thread before stopping other tasks - so that it won't restart them again.
|
||||
need_stop = true;
|
||||
need_stop = part_of_full_shutdown;
|
||||
task->deactivate();
|
||||
|
||||
/// Explicitly set the event, because the restarting thread will not set it again
|
||||
|
@ -193,6 +193,7 @@ ASTPtr ColumnStatisticsDescription::getAST() const
|
||||
{
|
||||
auto function_node = std::make_shared<ASTFunction>();
|
||||
function_node->name = "STATISTICS";
|
||||
function_node->kind = ASTFunction::Kind::STATISTICS;
|
||||
function_node->arguments = std::make_shared<ASTExpressionList>();
|
||||
for (const auto & [type, desc] : types_to_desc)
|
||||
{
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user